mirror of
https://github.com/checkpoint-restore/criu.git
synced 2026-01-23 02:14:37 +00:00
Run 'make indent' on header files
Acked-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Adrian Reber <areber@redhat.com>
This commit is contained in:
parent
93dd984ca0
commit
70833bcf29
252 changed files with 4746 additions and 5011 deletions
|
|
@ -5,11 +5,9 @@ typedef struct {
|
|||
int counter;
|
||||
} atomic_t;
|
||||
|
||||
|
||||
/* Copied from the Linux header arch/arm/include/asm/barrier.h */
|
||||
|
||||
#define smp_mb() asm volatile("dmb ish" : : : "memory")
|
||||
|
||||
#define smp_mb() asm volatile("dmb ish" : : : "memory")
|
||||
|
||||
/* Copied from the Linux kernel header arch/arm64/include/asm/atomic.h */
|
||||
|
||||
|
|
@ -25,20 +23,18 @@ static inline void atomic_set(atomic_t *v, int i)
|
|||
|
||||
#define atomic_get atomic_read
|
||||
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
asm volatile(
|
||||
"1: ldxr %w0, %2\n"
|
||||
" add %w0, %w0, %w3\n"
|
||||
" stlxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
: "Ir" (i)
|
||||
: "cc", "memory");
|
||||
asm volatile("1: ldxr %w0, %2\n"
|
||||
" add %w0, %w0, %w3\n"
|
||||
" stlxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r"(result), "=&r"(tmp), "+Q"(v->counter)
|
||||
: "Ir"(i)
|
||||
: "cc", "memory");
|
||||
|
||||
smp_mb();
|
||||
return result;
|
||||
|
|
@ -49,30 +45,38 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
asm volatile(
|
||||
"1: ldxr %w0, %2\n"
|
||||
" sub %w0, %w0, %w3\n"
|
||||
" stlxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
: "Ir" (i)
|
||||
: "cc", "memory");
|
||||
asm volatile("1: ldxr %w0, %2\n"
|
||||
" sub %w0, %w0, %w3\n"
|
||||
" stlxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r"(result), "=&r"(tmp), "+Q"(v->counter)
|
||||
: "Ir"(i)
|
||||
: "cc", "memory");
|
||||
|
||||
smp_mb();
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline int atomic_inc(atomic_t *v) { return atomic_add_return(1, v) - 1; }
|
||||
static inline int atomic_inc(atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(1, v) - 1;
|
||||
}
|
||||
|
||||
static inline int atomic_add(int val, atomic_t *v) { return atomic_add_return(val, v) - val; }
|
||||
static inline int atomic_add(int val, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(val, v) - val;
|
||||
}
|
||||
|
||||
static inline int atomic_dec(atomic_t *v) { return atomic_sub_return(1, v) + 1; }
|
||||
static inline int atomic_dec(atomic_t *v)
|
||||
{
|
||||
return atomic_sub_return(1, v) + 1;
|
||||
}
|
||||
|
||||
/* true if the result is 0, or false for all other cases. */
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
{
|
||||
|
|
@ -82,15 +86,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|||
smp_mb();
|
||||
|
||||
asm volatile("// atomic_cmpxchg\n"
|
||||
"1: ldxr %w1, %2\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 2f\n"
|
||||
" stxr %w0, %w4, %2\n"
|
||||
" cbnz %w0, 1b\n"
|
||||
"2:"
|
||||
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
"1: ldxr %w1, %2\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 2f\n"
|
||||
" stxr %w0, %w4, %2\n"
|
||||
" cbnz %w0, 1b\n"
|
||||
"2:"
|
||||
: "=&r"(tmp), "=&r"(oldval), "+Q"(ptr->counter)
|
||||
: "Ir"(old), "r"(new)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
return oldval;
|
||||
|
|
|
|||
|
|
@ -3,22 +3,21 @@
|
|||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define __ALIGN .align 4, 0x00
|
||||
#define __ALIGN_STR ".align 4, 0x00"
|
||||
#define __ALIGN .align 4, 0x00
|
||||
#define __ALIGN_STR ".align 4, 0x00"
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, #function; \
|
||||
__ALIGN; \
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, #function; \
|
||||
__ALIGN; \
|
||||
name:
|
||||
|
||||
#define END(sym) \
|
||||
.size sym, . - sym
|
||||
#define END(sym) .size sym, .- sym
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __CR_LINKAGE_H__ */
|
||||
|
|
|
|||
|
|
@ -29,11 +29,11 @@ static inline unsigned page_shift(void)
|
|||
* on aarch64, then we need refrain using PAGE_SIZE in criu and use
|
||||
* page_size() across sources (as it may differ on aarch64).
|
||||
*/
|
||||
#define PAGE_SIZE page_size()
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_SHIFT page_shift()
|
||||
#define PAGE_SIZE page_size()
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_SHIFT page_shift()
|
||||
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
|
||||
#else /* CR_NOGLIBC */
|
||||
|
||||
|
|
|
|||
|
|
@ -7,12 +7,11 @@ typedef struct {
|
|||
int counter;
|
||||
} atomic_t;
|
||||
|
||||
|
||||
/* Copied from the Linux kernel header arch/arm/include/asm/atomic.h */
|
||||
|
||||
#if defined(CONFIG_ARMV7)
|
||||
|
||||
#define smp_mb() __asm__ __volatile__ ("dmb" : : : "memory")
|
||||
#define smp_mb() __asm__ __volatile__("dmb" : : : "memory")
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
{
|
||||
|
|
@ -24,14 +23,14 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|||
|
||||
do {
|
||||
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
||||
"ldrex %1, [%3]\n"
|
||||
"mov %0, #0\n"
|
||||
"teq %1, %4\n"
|
||||
"it eq\n"
|
||||
"strexeq %0, %5, [%3]\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
||||
: "r" (&ptr->counter), "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
"ldrex %1, [%3]\n"
|
||||
"mov %0, #0\n"
|
||||
"teq %1, %4\n"
|
||||
"it eq\n"
|
||||
"strexeq %0, %5, [%3]\n"
|
||||
: "=&r"(res), "=&r"(oldval), "+Qo"(ptr->counter)
|
||||
: "r"(&ptr->counter), "Ir"(old), "r"(new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
|
||||
smp_mb();
|
||||
|
|
@ -43,7 +42,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|||
|
||||
/* SMP isn't supported for ARMv6 */
|
||||
|
||||
#define smp_mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
|
||||
#define smp_mb() __asm__ __volatile__("mcr p15, 0, %0, c7, c10, 5" : : "r"(0) : "memory")
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
|
|
@ -82,14 +81,14 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
|||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_add_return\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" add %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b\n"
|
||||
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" add %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b\n"
|
||||
: "=&r"(result), "=&r"(tmp), "+Qo"(v->counter)
|
||||
: "r"(&v->counter), "Ir"(i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
|
|
@ -104,30 +103,39 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_sub_return\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" sub %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b\n"
|
||||
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" sub %0, %0, %4\n"
|
||||
" strex %1, %0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b\n"
|
||||
: "=&r"(result), "=&r"(tmp), "+Qo"(v->counter)
|
||||
: "r"(&v->counter), "Ir"(i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline int atomic_inc(atomic_t *v) { return atomic_add_return(1, v) - 1; }
|
||||
static inline int atomic_inc(atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(1, v) - 1;
|
||||
}
|
||||
|
||||
static inline int atomic_add(int val, atomic_t *v) { return atomic_add_return(val, v) - val; }
|
||||
static inline int atomic_add(int val, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(val, v) - val;
|
||||
}
|
||||
|
||||
static inline int atomic_dec(atomic_t *v) { return atomic_sub_return(1, v) + 1; }
|
||||
static inline int atomic_dec(atomic_t *v)
|
||||
{
|
||||
return atomic_sub_return(1, v) + 1;
|
||||
}
|
||||
|
||||
/* true if the result is 0, or false for all other cases. */
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
|
||||
#endif /* __CR_ATOMIC_H__ */
|
||||
|
|
|
|||
|
|
@ -3,26 +3,25 @@
|
|||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define __ALIGN .align 4, 0x00
|
||||
#define __ALIGN_STR ".align 4, 0x00"
|
||||
#define __ALIGN .align 4, 0x00
|
||||
#define __ALIGN_STR ".align 4, 0x00"
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, #function; \
|
||||
__ALIGN; \
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, #function; \
|
||||
__ALIGN; \
|
||||
name:
|
||||
|
||||
#define END(sym) \
|
||||
.size sym, . - sym
|
||||
#define END(sym) .size sym, .- sym
|
||||
|
||||
#define ALIAS(sym_new, sym_old) \
|
||||
.globl sym_new; \
|
||||
#define ALIAS(sym_new, sym_old) \
|
||||
.globl sym_new; \
|
||||
.set sym_new, sym_old
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __CR_LINKAGE_H__ */
|
||||
|
|
|
|||
|
|
@ -2,18 +2,18 @@
|
|||
#define __CR_ASM_PAGE_H__
|
||||
|
||||
#ifndef PAGE_SHIFT
|
||||
# define PAGE_SHIFT 12
|
||||
#define PAGE_SHIFT 12
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
# define PAGE_SIZE (1UL << PAGE_SHIFT)
|
||||
#define PAGE_SIZE (1UL << PAGE_SHIFT)
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_MASK
|
||||
# define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#endif
|
||||
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define page_size() PAGE_SIZE
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define page_size() PAGE_SIZE
|
||||
|
||||
#endif /* __CR_ASM_PAGE_H__ */
|
||||
|
|
|
|||
|
|
@ -3,26 +3,21 @@
|
|||
|
||||
/* Copied from linux kernel arch/arm/include/asm/unified.h */
|
||||
|
||||
#define WASM(instr) #instr
|
||||
#define WASM(instr) #instr
|
||||
|
||||
/* Copied from linux kernel arch/arm/include/asm/processor.h */
|
||||
|
||||
#define __ALT_SMP_ASM(smp, up) \
|
||||
"9998: " smp "\n" \
|
||||
" .pushsection \".alt.smp.init\", \"a\"\n" \
|
||||
" .long 9998b\n" \
|
||||
" " up "\n" \
|
||||
#define __ALT_SMP_ASM(smp, up) \
|
||||
"9998: " smp "\n" \
|
||||
" .pushsection \".alt.smp.init\", \"a\"\n" \
|
||||
" .long 9998b\n" \
|
||||
" " up "\n" \
|
||||
" .popsection\n"
|
||||
|
||||
static inline void prefetchw(const void *ptr)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
".arch_extension mp\n"
|
||||
__ALT_SMP_ASM(
|
||||
WASM(pldw) "\t%a0",
|
||||
WASM(pld) "\t%a0"
|
||||
)
|
||||
:: "p" (ptr));
|
||||
".arch_extension mp\n" __ALT_SMP_ASM(WASM(pldw) "\t%a0", WASM(pld) "\t%a0")::"p"(ptr));
|
||||
}
|
||||
|
||||
#endif /* __CR_PROCESSOR_H__ */
|
||||
|
|
|
|||
91
include/common/arch/mips/asm/atomic.h
Executable file → Normal file
91
include/common/arch/mips/asm/atomic.h
Executable file → Normal file
|
|
@ -12,7 +12,7 @@
|
|||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
||||
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
||||
|
||||
/*
|
||||
* atomic_set - set atomic variable
|
||||
|
|
@ -21,7 +21,7 @@
|
|||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*/
|
||||
#define atomic_set(v, i) ((v)->counter = (i))
|
||||
#define atomic_set(v, i) ((v)->counter = (i))
|
||||
/*
|
||||
* atomic_add - add integer to atomic variable
|
||||
* @i: integer value to add
|
||||
|
|
@ -30,20 +30,19 @@
|
|||
* Atomically adds @i to @v.
|
||||
*/
|
||||
|
||||
static __inline__ void atomic_add(int i, atomic_t * v)
|
||||
static __inline__ void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
int temp;
|
||||
int temp;
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" ll %0, %1 # atomic_add \n"
|
||||
" addu %0, %2 \n"
|
||||
" sc %0, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (temp), "+m" (v->counter)
|
||||
: "Ir" (i));
|
||||
} while (unlikely(!temp));
|
||||
do {
|
||||
__asm__ __volatile__(" .set mips3 \n"
|
||||
" ll %0, %1 # atomic_add \n"
|
||||
" addu %0, %2 \n"
|
||||
" sc %0, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r"(temp), "+m"(v->counter)
|
||||
: "Ir"(i));
|
||||
} while (unlikely(!temp));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -53,43 +52,40 @@ static __inline__ void atomic_add(int i, atomic_t * v)
|
|||
*
|
||||
* Atomically subtracts @i from @v.
|
||||
*/
|
||||
static __inline__ void atomic_sub(int i, atomic_t * v)
|
||||
static __inline__ void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
int temp;
|
||||
int temp;
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" ll %0, %1 # atomic_sub \n"
|
||||
" subu %0, %2 \n"
|
||||
" sc %0, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (temp), "+m" (v->counter)
|
||||
: "Ir" (i));
|
||||
} while (unlikely(!temp));
|
||||
do {
|
||||
__asm__ __volatile__(" .set mips3 \n"
|
||||
" ll %0, %1 # atomic_sub \n"
|
||||
" subu %0, %2 \n"
|
||||
" sc %0, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r"(temp), "+m"(v->counter)
|
||||
: "Ir"(i));
|
||||
} while (unlikely(!temp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as above, but return the result value
|
||||
*/
|
||||
static __inline__ int atomic_add_return(int i, atomic_t * v)
|
||||
static __inline__ int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int result;
|
||||
|
||||
smp_mb__before_llsc();
|
||||
|
||||
|
||||
int temp;
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" ll %1, %2 # atomic_add_return \n"
|
||||
" addu %0, %1, %3 \n"
|
||||
" sc %0, %2 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
|
||||
: "Ir" (i));
|
||||
__asm__ __volatile__(" .set mips3 \n"
|
||||
" ll %1, %2 # atomic_add_return \n"
|
||||
" addu %0, %1, %3 \n"
|
||||
" sc %0, %2 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r"(result), "=&r"(temp), "+m"(v->counter)
|
||||
: "Ir"(i));
|
||||
} while (unlikely(!result));
|
||||
|
||||
result = temp + i;
|
||||
|
|
@ -99,7 +95,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
|
|||
return result;
|
||||
}
|
||||
|
||||
static __inline__ int atomic_sub_return(int i, atomic_t * v)
|
||||
static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
int result;
|
||||
|
||||
|
|
@ -108,14 +104,13 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
|
|||
int temp;
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" ll %1, %2 # atomic_sub_return \n"
|
||||
" subu %0, %1, %3 \n"
|
||||
" sc %0, %2 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
|
||||
: "Ir" (i));
|
||||
__asm__ __volatile__(" .set mips3 \n"
|
||||
" ll %1, %2 # atomic_sub_return \n"
|
||||
" subu %0, %1, %3 \n"
|
||||
" sc %0, %2 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r"(result), "=&r"(temp), "+m"(v->counter)
|
||||
: "Ir"(i));
|
||||
} while (unlikely(!result));
|
||||
|
||||
result = temp - i;
|
||||
|
|
@ -126,8 +121,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
|
|||
}
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
|
||||
/*
|
||||
* atomic_inc - increment atomic variable
|
||||
|
|
@ -135,7 +130,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
|
|||
*
|
||||
* Atomically increments @v by 1.
|
||||
*/
|
||||
#define atomic_inc( v) atomic_add(1, (v))
|
||||
#define atomic_inc(v) atomic_add(1, (v))
|
||||
|
||||
/*
|
||||
* atomic_dec - decrement and test
|
||||
|
|
|
|||
|
|
@ -13,24 +13,22 @@
|
|||
* It also implies a memory barrier.
|
||||
*/
|
||||
|
||||
static inline int test_and_set_bit(unsigned long nr,
|
||||
volatile unsigned long *addr)
|
||||
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
|
||||
unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
|
||||
unsigned long temp = 0;
|
||||
unsigned long res;
|
||||
int bit = nr & 63UL;
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" lld %0, %1 # test_and_set_bit \n"
|
||||
" or %2, %0, %3 \n"
|
||||
" scd %2, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (temp), "+m" (*m), "=&r" (res)
|
||||
: "r" (1UL << bit)
|
||||
: "memory");
|
||||
__asm__ __volatile__(" .set mips3 \n"
|
||||
" lld %0, %1 # test_and_set_bit \n"
|
||||
" or %2, %0, %3 \n"
|
||||
" scd %2, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r"(temp), "+m"(*m), "=&r"(res)
|
||||
: "r"(1UL << bit)
|
||||
: "memory");
|
||||
} while (unlikely(!res));
|
||||
|
||||
res = temp & (1UL << bit);
|
||||
|
|
|
|||
2
include/common/arch/mips/asm/bitsperlong.h
Executable file → Normal file
2
include/common/arch/mips/asm/bitsperlong.h
Executable file → Normal file
|
|
@ -1,6 +1,6 @@
|
|||
#ifndef __CR_BITSPERLONG_H__
|
||||
#define __CR_BITSPERLONG_H__
|
||||
|
||||
# define BITS_PER_LONG 64
|
||||
#define BITS_PER_LONG 64
|
||||
|
||||
#endif /* __CR_BITSPERLONG_H__ */
|
||||
|
|
|
|||
108
include/common/arch/mips/asm/cmpxchg.h
Executable file → Normal file
108
include/common/arch/mips/asm/cmpxchg.h
Executable file → Normal file
|
|
@ -1,67 +1,65 @@
|
|||
#ifndef __CR_CMPXCHG_H__
|
||||
#define __CR_CMPXCHG_H__
|
||||
|
||||
#define __cmpxchg_asm(ld, st, m, old, new) \
|
||||
({ \
|
||||
__typeof(*(m)) __ret; \
|
||||
\
|
||||
if (kernel_uses_llsc) { \
|
||||
__asm__ __volatile__( \
|
||||
" .set push \n" \
|
||||
" .set noat \n" \
|
||||
" .set mips3 \n" \
|
||||
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
|
||||
" bne %0, %z3, 2f \n" \
|
||||
" .set mips0 \n" \
|
||||
" move $1, %z4 \n" \
|
||||
" .set mips3 \n" \
|
||||
" " st " $1, %1 \n" \
|
||||
" beqz $1, 1b \n" \
|
||||
" .set pop \n" \
|
||||
"2: \n" \
|
||||
: "=&r" (__ret), "=R" (*m) \
|
||||
: "R" (*m), "Jr" (old), "Jr" (new) \
|
||||
: "memory"); \
|
||||
} else { \
|
||||
} \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
#define __cmpxchg_asm(ld, st, m, old, new) \
|
||||
({ \
|
||||
__typeof(*(m)) __ret; \
|
||||
\
|
||||
if (kernel_uses_llsc) { \
|
||||
__asm__ __volatile__(" .set push \n" \
|
||||
" .set noat \n" \
|
||||
" .set mips3 \n" \
|
||||
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
|
||||
" bne %0, %z3, 2f \n" \
|
||||
" .set mips0 \n" \
|
||||
" move $1, %z4 \n" \
|
||||
" .set mips3 \n" \
|
||||
" " st " $1, %1 \n" \
|
||||
" beqz $1, 1b \n" \
|
||||
" .set pop \n" \
|
||||
"2: \n" \
|
||||
: "=&r"(__ret), "=R"(*m) \
|
||||
: "R"(*m), "Jr"(old), "Jr"(new) \
|
||||
: "memory"); \
|
||||
} else { \
|
||||
} \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
/*
|
||||
* This function doesn't exist, so you'll get a linker error
|
||||
* if something tries to do an invalid cmpxchg().
|
||||
*/
|
||||
extern void __cmpxchg_called_with_bad_pointer(void);
|
||||
|
||||
#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \
|
||||
({ \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __old = (old); \
|
||||
__typeof__(*(ptr)) __new = (new); \
|
||||
__typeof__(*(ptr)) __res = 0; \
|
||||
\
|
||||
pre_barrier; \
|
||||
\
|
||||
switch (sizeof(*(__ptr))) { \
|
||||
case 4: \
|
||||
__res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
|
||||
break; \
|
||||
case 8: \
|
||||
if (sizeof(long) == 8) { \
|
||||
__res = __cmpxchg_asm("lld", "scd", __ptr, \
|
||||
__old, __new); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
post_barrier; \
|
||||
\
|
||||
__res; \
|
||||
})
|
||||
#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \
|
||||
({ \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __old = (old); \
|
||||
__typeof__(*(ptr)) __new = (new); \
|
||||
__typeof__(*(ptr)) __res = 0; \
|
||||
\
|
||||
pre_barrier; \
|
||||
\
|
||||
switch (sizeof(*(__ptr))) { \
|
||||
case 4: \
|
||||
__res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
|
||||
break; \
|
||||
case 8: \
|
||||
if (sizeof(long) == 8) { \
|
||||
__res = __cmpxchg_asm("lld", "scd", __ptr, __old, __new); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
post_barrier; \
|
||||
\
|
||||
__res; \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
|
||||
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
|
||||
|
||||
#endif /* __CR_CMPXCHG_H__ */
|
||||
|
|
|
|||
|
|
@ -1,58 +1,56 @@
|
|||
#ifndef __CR_LINKAGE_H__
|
||||
#define __CR_LINKAGE_H__
|
||||
|
||||
#define zero $0 /* wired zero */
|
||||
#define AT $1 /* assembler temp - uppercase because of ".set at" */
|
||||
#define v0 $2
|
||||
#define v1 $3
|
||||
#define zero $0 /* wired zero */
|
||||
#define AT $1 /* assembler temp - uppercase because of ".set at" */
|
||||
#define v0 $2
|
||||
#define v1 $3
|
||||
|
||||
#define a0 $4
|
||||
#define a1 $5
|
||||
#define a2 $6
|
||||
#define a3 $7
|
||||
#define a4 $8
|
||||
#define a5 $9
|
||||
#define a6 $10
|
||||
#define a7 $11
|
||||
#define t0 $12
|
||||
#define t1 $13
|
||||
#define t2 $14
|
||||
#define t3 $15
|
||||
#define a0 $4
|
||||
#define a1 $5
|
||||
#define a2 $6
|
||||
#define a3 $7
|
||||
#define a4 $8
|
||||
#define a5 $9
|
||||
#define a6 $10
|
||||
#define a7 $11
|
||||
#define t0 $12
|
||||
#define t1 $13
|
||||
#define t2 $14
|
||||
#define t3 $15
|
||||
|
||||
#define s0 $16 /* callee saved */
|
||||
#define s1 $17
|
||||
#define s2 $18
|
||||
#define s3 $19
|
||||
#define s4 $20
|
||||
#define s5 $21
|
||||
#define s6 $22
|
||||
#define s7 $23
|
||||
#define t8 $24 /* caller saved */
|
||||
#define t9 $25
|
||||
#define jp $25 /* PIC jump register */
|
||||
#define k0 $26 /* kernel scratch */
|
||||
#define k1 $27
|
||||
#define gp $28 /* global pointer */
|
||||
#define sp $29 /* stack pointer */
|
||||
#define fp $30 /* frame pointer */
|
||||
#define s8 $30 /* same like fp! */
|
||||
#define ra $31 /* return address */
|
||||
#define s0 $16 /* callee saved */
|
||||
#define s1 $17
|
||||
#define s2 $18
|
||||
#define s3 $19
|
||||
#define s4 $20
|
||||
#define s5 $21
|
||||
#define s6 $22
|
||||
#define s7 $23
|
||||
#define t8 $24 /* caller saved */
|
||||
#define t9 $25
|
||||
#define jp $25 /* PIC jump register */
|
||||
#define k0 $26 /* kernel scratch */
|
||||
#define k1 $27
|
||||
#define gp $28 /* global pointer */
|
||||
#define sp $29 /* stack pointer */
|
||||
#define fp $30 /* frame pointer */
|
||||
#define s8 $30 /* same like fp! */
|
||||
#define ra $31 /* return address */
|
||||
|
||||
#define __ALIGN .align 8
|
||||
#define __ALIGN_STR ".align 8"
|
||||
#define __ALIGN .align 8
|
||||
#define __ALIGN_STR ".align 8"
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
__ALIGN; \
|
||||
.type name, @function; \
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
__ALIGN; \
|
||||
.type name, @function; \
|
||||
name:
|
||||
|
||||
#define END(sym) \
|
||||
.size sym, . - sym
|
||||
|
||||
#define END(sym) .size sym, .- sym
|
||||
|
||||
#endif /* __CR_LINKAGE_H__ */
|
||||
|
|
|
|||
10
include/common/arch/mips/asm/page.h
Executable file → Normal file
10
include/common/arch/mips/asm/page.h
Executable file → Normal file
|
|
@ -13,7 +13,7 @@ static unsigned __page_shift;
|
|||
static inline unsigned page_size(void)
|
||||
{
|
||||
if (!__page_size)
|
||||
__page_size = sysconf(_SC_PAGESIZE);
|
||||
__page_size = sysconf(_SC_PAGESIZE);
|
||||
return __page_size;
|
||||
}
|
||||
|
||||
|
|
@ -24,11 +24,11 @@ static inline unsigned page_shift(void)
|
|||
return __page_shift;
|
||||
}
|
||||
|
||||
#define PAGE_SIZE page_size()
|
||||
#define PAGE_SHIFT page_shift()
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_SIZE page_size()
|
||||
#define PAGE_SHIFT page_shift()
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#else /* CR_NOGLIBC */
|
||||
|
||||
extern unsigned page_size(void);
|
||||
|
|
|
|||
|
|
@ -1,24 +1,22 @@
|
|||
#ifndef __UTILS_H__
|
||||
#define __UTILS_H__
|
||||
|
||||
|
||||
# define kernel_uses_llsc 1
|
||||
#define kernel_uses_llsc 1
|
||||
|
||||
typedef struct {
|
||||
int counter;
|
||||
}atomic_t;
|
||||
|
||||
} atomic_t;
|
||||
|
||||
/*
|
||||
* FIXME: detect with compel_cpu_has_feature() if LL/SC implicitly
|
||||
* provide a memory barrier.
|
||||
*/
|
||||
#define __WEAK_LLSC_MB " sync \n"
|
||||
#define __WEAK_LLSC_MB " sync \n"
|
||||
|
||||
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
|
||||
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : : "memory")
|
||||
|
||||
#define smp_mb__before_llsc() smp_llsc_mb()
|
||||
#define smp_mb__before_atomic() smp_mb__before_llsc()
|
||||
#define smp_mb__before_llsc() smp_llsc_mb()
|
||||
#define smp_mb__before_atomic() smp_mb__before_llsc()
|
||||
#define smp_mb__after_atomic() smp_llsc_mb()
|
||||
|
||||
#endif /* __UTILS_H__ */
|
||||
|
|
|
|||
|
|
@ -13,10 +13,13 @@ typedef struct {
|
|||
|
||||
#include "common/arch/ppc64/asm/cmpxchg.h"
|
||||
|
||||
#define PPC_ATOMIC_ENTRY_BARRIER "lwsync \n"
|
||||
#define PPC_ATOMIC_EXIT_BARRIER "sync \n"
|
||||
#define PPC_ATOMIC_ENTRY_BARRIER "lwsync \n"
|
||||
#define PPC_ATOMIC_EXIT_BARRIER "sync \n"
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
#define ATOMIC_INIT(i) \
|
||||
{ \
|
||||
(i) \
|
||||
}
|
||||
|
||||
static __inline__ int atomic_read(const atomic_t *v)
|
||||
{
|
||||
|
|
@ -32,6 +35,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
|
|||
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
|
||||
}
|
||||
|
||||
/* clang-format off */
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
static __inline__ void atomic_##op(int a, atomic_t *v) \
|
||||
{ \
|
||||
|
|
@ -124,10 +128,11 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
|
|||
|
||||
return t;
|
||||
}
|
||||
/* clang-format on */
|
||||
|
||||
/* true if the result is 0, or false for all other cases. */
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
|
||||
|
|
|
|||
|
|
@ -42,37 +42,36 @@
|
|||
|
||||
#include "common/asm/bitsperlong.h"
|
||||
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
|
||||
#define DECLARE_BITMAP(name,bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
#define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
||||
#define __stringify_in_c(...) #__VA_ARGS__
|
||||
#define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
|
||||
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
|
||||
/* PPC bit number conversion */
|
||||
#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
|
||||
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
|
||||
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
|
||||
#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
|
||||
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
|
||||
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
|
||||
|
||||
#define PPC_INST_LDARX 0x7c0000a8
|
||||
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
|
||||
#define ___PPC_RB(b) (((b) & 0x1f) << 11)
|
||||
#define ___PPC_RS(s) (((s) & 0x1f) << 21)
|
||||
#define __PPC_EH(eh) (((eh) & 0x1) << 0)
|
||||
#define ___PPC_RT(t) ___PPC_RS(t)
|
||||
#define PPC_INST_LDARX 0x7c0000a8
|
||||
#define ___PPC_RA(a) (((a)&0x1f) << 16)
|
||||
#define ___PPC_RB(b) (((b)&0x1f) << 11)
|
||||
#define ___PPC_RS(s) (((s)&0x1f) << 21)
|
||||
#define __PPC_EH(eh) (((eh)&0x1) << 0)
|
||||
#define ___PPC_RT(t) ___PPC_RS(t)
|
||||
|
||||
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
|
||||
___PPC_RT(t) | ___PPC_RA(a) | \
|
||||
___PPC_RB(b) | __PPC_EH(eh))
|
||||
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
|
||||
#define PPC_LDARX(t, a, b, eh) \
|
||||
stringify_in_c(.long PPC_INST_LDARX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
|
||||
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
|
||||
|
||||
/* clang-format off */
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
#define DEFINE_BITOP(fn, op) \
|
||||
#define DEFINE_BITOP(fn, op) \
|
||||
static __inline__ void fn(unsigned long mask, \
|
||||
volatile unsigned long *_p) \
|
||||
{ \
|
||||
|
|
@ -87,6 +86,7 @@ static __inline__ void fn(unsigned long mask, \
|
|||
: "r" (mask), "r" (p) \
|
||||
: "cc", "memory"); \
|
||||
}
|
||||
/* clang-format on */
|
||||
|
||||
DEFINE_BITOP(set_bits, or)
|
||||
DEFINE_BITOP(clear_bits, andc)
|
||||
|
|
@ -94,26 +94,27 @@ DEFINE_BITOP(change_bits, xor)
|
|||
|
||||
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
|
||||
set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
|
||||
}
|
||||
|
||||
static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
|
||||
clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
|
||||
}
|
||||
|
||||
static __inline__ void change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
|
||||
change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
|
||||
}
|
||||
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
|
||||
}
|
||||
|
||||
/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
|
||||
* operands. */
|
||||
/* clang-format off */
|
||||
#define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \
|
||||
static __inline__ unsigned long fn( \
|
||||
unsigned long mask, \
|
||||
|
|
@ -133,11 +134,11 @@ static __inline__ unsigned long fn( \
|
|||
: "cc", "memory"); \
|
||||
return (old & mask); \
|
||||
}
|
||||
/* clang-format on */
|
||||
|
||||
DEFINE_TESTOP(test_and_set_bits, or, "\nLWSYNC\n", "\nsync\n", 0)
|
||||
|
||||
static __inline__ int test_and_set_bit(unsigned long nr,
|
||||
volatile unsigned long *addr)
|
||||
static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
|
||||
}
|
||||
|
|
@ -146,70 +147,63 @@ static __inline__ int test_and_set_bit(unsigned long nr,
|
|||
* Return the zero-based bit position (LE, not IBM bit numbering) of
|
||||
* the most significant 1-bit in a double word.
|
||||
*/
|
||||
static __inline__ __attribute__((const))
|
||||
int __ilog2(unsigned long x)
|
||||
static __inline__ __attribute__((const)) int __ilog2(unsigned long x)
|
||||
{
|
||||
int lz;
|
||||
int lz;
|
||||
|
||||
asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
|
||||
return BITS_PER_LONG - 1 - lz;
|
||||
asm("cntlzd %0,%1" : "=r"(lz) : "r"(x));
|
||||
return BITS_PER_LONG - 1 - lz;
|
||||
}
|
||||
|
||||
|
||||
static __inline__ unsigned long __ffs(unsigned long x)
|
||||
{
|
||||
return __ilog2(x & -x);
|
||||
return __ilog2(x & -x);
|
||||
}
|
||||
|
||||
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
/*
|
||||
* Find the next set bit in a memory region.
|
||||
*/
|
||||
static inline
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
static inline unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long tmp;
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG - 1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
size -= result;
|
||||
offset %= BITS_PER_LONG;
|
||||
if (offset) {
|
||||
tmp = *(p++);
|
||||
tmp &= (~0UL << offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = *p;
|
||||
if (offset >= size)
|
||||
return size;
|
||||
size -= result;
|
||||
offset %= BITS_PER_LONG;
|
||||
if (offset) {
|
||||
tmp = *(p++);
|
||||
tmp &= (~0UL << offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG - 1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = *p;
|
||||
|
||||
found_first:
|
||||
tmp &= (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
tmp &= (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + __ffs(tmp);
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); \
|
||||
i < sizeof(bitmask); \
|
||||
i = find_next_bit(bitmask, sizeof(bitmask), i + 1))
|
||||
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); i < sizeof(bitmask); \
|
||||
i = find_next_bit(bitmask, sizeof(bitmask), i + 1))
|
||||
|
||||
#endif /* __CR_BITOPS_H__ */
|
||||
|
|
|
|||
|
|
@ -5,54 +5,44 @@
|
|||
* Copied from kernel header file arch/powerpc/include/asm/cmpxchg.h
|
||||
*/
|
||||
|
||||
#define PPC_ACQUIRE_BARRIER "isync \n"
|
||||
#define PPC_RELEASE_BARRIER "lwsync \n"
|
||||
#define PPC_ACQUIRE_BARRIER "isync \n"
|
||||
#define PPC_RELEASE_BARRIER "lwsync \n"
|
||||
|
||||
/*
|
||||
* Compare and exchange - if *p == old, set it to new,
|
||||
* and return the old value of *p.
|
||||
*/
|
||||
|
||||
static __always_inline unsigned long
|
||||
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
|
||||
static __always_inline unsigned long __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
|
||||
{
|
||||
unsigned int prev;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_RELEASE_BARRIER \
|
||||
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
|
||||
__asm__ __volatile__(PPC_RELEASE_BARRIER "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
|
||||
cmpw 0,%0,%3\n\
|
||||
bne- 2f\n"
|
||||
" stwcx. %4,0,%2\n\
|
||||
bne- 1b \n" \
|
||||
PPC_ACQUIRE_BARRIER
|
||||
"\n\
|
||||
" stwcx. %4,0,%2\n\
|
||||
bne- 1b \n" PPC_ACQUIRE_BARRIER "\n\
|
||||
2:"
|
||||
: "=&r" (prev), "+m" (*p)
|
||||
: "r" (p), "r" (old), "r" (new)
|
||||
: "cc", "memory");
|
||||
: "=&r"(prev), "+m"(*p)
|
||||
: "r"(p), "r"(old), "r"(new)
|
||||
: "cc", "memory");
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
||||
static __always_inline unsigned long
|
||||
__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
|
||||
static __always_inline unsigned long __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
|
||||
{
|
||||
unsigned long prev;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_RELEASE_BARRIER \
|
||||
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
|
||||
__asm__ __volatile__(PPC_RELEASE_BARRIER "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
|
||||
cmpd 0,%0,%3\n\
|
||||
bne- 2f\n\
|
||||
stdcx. %4,0,%2\n\
|
||||
bne- 1b \n" \
|
||||
PPC_ACQUIRE_BARRIER
|
||||
"\n\
|
||||
bne- 1b \n" PPC_ACQUIRE_BARRIER "\n\
|
||||
2:"
|
||||
: "=&r" (prev), "+m" (*p)
|
||||
: "r" (p), "r" (old), "r" (new)
|
||||
: "cc", "memory");
|
||||
: "=&r"(prev), "+m"(*p)
|
||||
: "r"(p), "r"(old), "r"(new)
|
||||
: "cc", "memory");
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
|
@ -62,18 +52,18 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
|
|||
#ifdef CR_DEBUG
|
||||
static inline void __cmpxchg_called_with_bad_pointer(void)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"1: twi 31,0,0 # trap\n"
|
||||
" b 1b"
|
||||
: : : "memory");
|
||||
__asm__ __volatile__("1: twi 31,0,0 # trap\n"
|
||||
" b 1b"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
}
|
||||
#else
|
||||
extern void __cmpxchg_called_with_bad_pointer(void);
|
||||
#endif
|
||||
|
||||
static __always_inline unsigned long
|
||||
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
|
||||
unsigned int size)
|
||||
static __always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
|
||||
unsigned int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 4:
|
||||
|
|
@ -85,12 +75,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
|
|||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
|
||||
(unsigned long)_n_, sizeof(*(ptr))); \
|
||||
})
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#endif /* __CR_CMPXCHG_H__ */
|
||||
|
|
|
|||
|
|
@ -10,36 +10,33 @@
|
|||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, @function; \
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, @function; \
|
||||
name:
|
||||
|
||||
#define END(sym) \
|
||||
.size sym, . - sym
|
||||
|
||||
#define END(sym) .size sym, .- sym
|
||||
|
||||
#define STACKFRAMESIZE 256
|
||||
#define __STK_REG(i) (112 + ((i)-14)*8)
|
||||
#define __STK_REG(i) (112 + ((i)-14) * 8)
|
||||
#define STK_REG(i) __STK_REG(__REG_##i)
|
||||
|
||||
/* The boring bits... */
|
||||
|
||||
/* Condition Register Bit Fields */
|
||||
|
||||
#define cr0 0
|
||||
#define cr1 1
|
||||
#define cr2 2
|
||||
#define cr3 3
|
||||
#define cr4 4
|
||||
#define cr5 5
|
||||
#define cr6 6
|
||||
#define cr7 7
|
||||
|
||||
#define cr0 0
|
||||
#define cr1 1
|
||||
#define cr2 2
|
||||
#define cr3 3
|
||||
#define cr4 4
|
||||
#define cr5 5
|
||||
#define cr6 6
|
||||
#define cr7 7
|
||||
|
||||
/*
|
||||
* General Purpose Registers (GPRs)
|
||||
|
|
@ -49,6 +46,7 @@
|
|||
* Use R0-31 only when really nessesary.
|
||||
*/
|
||||
|
||||
/* clang-format off */
|
||||
#define r0 %r0
|
||||
#define r1 %r1
|
||||
#define r2 %r2
|
||||
|
|
@ -81,221 +79,219 @@
|
|||
#define r29 %r29
|
||||
#define r30 %r30
|
||||
#define r31 %r31
|
||||
|
||||
/* clang-format on */
|
||||
|
||||
/* Floating Point Registers (FPRs) */
|
||||
|
||||
#define fr0 0
|
||||
#define fr1 1
|
||||
#define fr2 2
|
||||
#define fr3 3
|
||||
#define fr4 4
|
||||
#define fr5 5
|
||||
#define fr6 6
|
||||
#define fr7 7
|
||||
#define fr8 8
|
||||
#define fr9 9
|
||||
#define fr10 10
|
||||
#define fr11 11
|
||||
#define fr12 12
|
||||
#define fr13 13
|
||||
#define fr14 14
|
||||
#define fr15 15
|
||||
#define fr16 16
|
||||
#define fr17 17
|
||||
#define fr18 18
|
||||
#define fr19 19
|
||||
#define fr20 20
|
||||
#define fr21 21
|
||||
#define fr22 22
|
||||
#define fr23 23
|
||||
#define fr24 24
|
||||
#define fr25 25
|
||||
#define fr26 26
|
||||
#define fr27 27
|
||||
#define fr28 28
|
||||
#define fr29 29
|
||||
#define fr30 30
|
||||
#define fr31 31
|
||||
#define fr0 0
|
||||
#define fr1 1
|
||||
#define fr2 2
|
||||
#define fr3 3
|
||||
#define fr4 4
|
||||
#define fr5 5
|
||||
#define fr6 6
|
||||
#define fr7 7
|
||||
#define fr8 8
|
||||
#define fr9 9
|
||||
#define fr10 10
|
||||
#define fr11 11
|
||||
#define fr12 12
|
||||
#define fr13 13
|
||||
#define fr14 14
|
||||
#define fr15 15
|
||||
#define fr16 16
|
||||
#define fr17 17
|
||||
#define fr18 18
|
||||
#define fr19 19
|
||||
#define fr20 20
|
||||
#define fr21 21
|
||||
#define fr22 22
|
||||
#define fr23 23
|
||||
#define fr24 24
|
||||
#define fr25 25
|
||||
#define fr26 26
|
||||
#define fr27 27
|
||||
#define fr28 28
|
||||
#define fr29 29
|
||||
#define fr30 30
|
||||
#define fr31 31
|
||||
|
||||
/* AltiVec Registers (VPRs) */
|
||||
|
||||
#define vr0 0
|
||||
#define vr1 1
|
||||
#define vr2 2
|
||||
#define vr3 3
|
||||
#define vr4 4
|
||||
#define vr5 5
|
||||
#define vr6 6
|
||||
#define vr7 7
|
||||
#define vr8 8
|
||||
#define vr9 9
|
||||
#define vr10 10
|
||||
#define vr11 11
|
||||
#define vr12 12
|
||||
#define vr13 13
|
||||
#define vr14 14
|
||||
#define vr15 15
|
||||
#define vr16 16
|
||||
#define vr17 17
|
||||
#define vr18 18
|
||||
#define vr19 19
|
||||
#define vr20 20
|
||||
#define vr21 21
|
||||
#define vr22 22
|
||||
#define vr23 23
|
||||
#define vr24 24
|
||||
#define vr25 25
|
||||
#define vr26 26
|
||||
#define vr27 27
|
||||
#define vr28 28
|
||||
#define vr29 29
|
||||
#define vr30 30
|
||||
#define vr31 31
|
||||
#define vr0 0
|
||||
#define vr1 1
|
||||
#define vr2 2
|
||||
#define vr3 3
|
||||
#define vr4 4
|
||||
#define vr5 5
|
||||
#define vr6 6
|
||||
#define vr7 7
|
||||
#define vr8 8
|
||||
#define vr9 9
|
||||
#define vr10 10
|
||||
#define vr11 11
|
||||
#define vr12 12
|
||||
#define vr13 13
|
||||
#define vr14 14
|
||||
#define vr15 15
|
||||
#define vr16 16
|
||||
#define vr17 17
|
||||
#define vr18 18
|
||||
#define vr19 19
|
||||
#define vr20 20
|
||||
#define vr21 21
|
||||
#define vr22 22
|
||||
#define vr23 23
|
||||
#define vr24 24
|
||||
#define vr25 25
|
||||
#define vr26 26
|
||||
#define vr27 27
|
||||
#define vr28 28
|
||||
#define vr29 29
|
||||
#define vr30 30
|
||||
#define vr31 31
|
||||
|
||||
/* VSX Registers (VSRs) */
|
||||
|
||||
#define vsr0 0
|
||||
#define vsr1 1
|
||||
#define vsr2 2
|
||||
#define vsr3 3
|
||||
#define vsr4 4
|
||||
#define vsr5 5
|
||||
#define vsr6 6
|
||||
#define vsr7 7
|
||||
#define vsr8 8
|
||||
#define vsr9 9
|
||||
#define vsr10 10
|
||||
#define vsr11 11
|
||||
#define vsr12 12
|
||||
#define vsr13 13
|
||||
#define vsr14 14
|
||||
#define vsr15 15
|
||||
#define vsr16 16
|
||||
#define vsr17 17
|
||||
#define vsr18 18
|
||||
#define vsr19 19
|
||||
#define vsr20 20
|
||||
#define vsr21 21
|
||||
#define vsr22 22
|
||||
#define vsr23 23
|
||||
#define vsr24 24
|
||||
#define vsr25 25
|
||||
#define vsr26 26
|
||||
#define vsr27 27
|
||||
#define vsr28 28
|
||||
#define vsr29 29
|
||||
#define vsr30 30
|
||||
#define vsr31 31
|
||||
#define vsr32 32
|
||||
#define vsr33 33
|
||||
#define vsr34 34
|
||||
#define vsr35 35
|
||||
#define vsr36 36
|
||||
#define vsr37 37
|
||||
#define vsr38 38
|
||||
#define vsr39 39
|
||||
#define vsr40 40
|
||||
#define vsr41 41
|
||||
#define vsr42 42
|
||||
#define vsr43 43
|
||||
#define vsr44 44
|
||||
#define vsr45 45
|
||||
#define vsr46 46
|
||||
#define vsr47 47
|
||||
#define vsr48 48
|
||||
#define vsr49 49
|
||||
#define vsr50 50
|
||||
#define vsr51 51
|
||||
#define vsr52 52
|
||||
#define vsr53 53
|
||||
#define vsr54 54
|
||||
#define vsr55 55
|
||||
#define vsr56 56
|
||||
#define vsr57 57
|
||||
#define vsr58 58
|
||||
#define vsr59 59
|
||||
#define vsr60 60
|
||||
#define vsr61 61
|
||||
#define vsr62 62
|
||||
#define vsr63 63
|
||||
#define vsr0 0
|
||||
#define vsr1 1
|
||||
#define vsr2 2
|
||||
#define vsr3 3
|
||||
#define vsr4 4
|
||||
#define vsr5 5
|
||||
#define vsr6 6
|
||||
#define vsr7 7
|
||||
#define vsr8 8
|
||||
#define vsr9 9
|
||||
#define vsr10 10
|
||||
#define vsr11 11
|
||||
#define vsr12 12
|
||||
#define vsr13 13
|
||||
#define vsr14 14
|
||||
#define vsr15 15
|
||||
#define vsr16 16
|
||||
#define vsr17 17
|
||||
#define vsr18 18
|
||||
#define vsr19 19
|
||||
#define vsr20 20
|
||||
#define vsr21 21
|
||||
#define vsr22 22
|
||||
#define vsr23 23
|
||||
#define vsr24 24
|
||||
#define vsr25 25
|
||||
#define vsr26 26
|
||||
#define vsr27 27
|
||||
#define vsr28 28
|
||||
#define vsr29 29
|
||||
#define vsr30 30
|
||||
#define vsr31 31
|
||||
#define vsr32 32
|
||||
#define vsr33 33
|
||||
#define vsr34 34
|
||||
#define vsr35 35
|
||||
#define vsr36 36
|
||||
#define vsr37 37
|
||||
#define vsr38 38
|
||||
#define vsr39 39
|
||||
#define vsr40 40
|
||||
#define vsr41 41
|
||||
#define vsr42 42
|
||||
#define vsr43 43
|
||||
#define vsr44 44
|
||||
#define vsr45 45
|
||||
#define vsr46 46
|
||||
#define vsr47 47
|
||||
#define vsr48 48
|
||||
#define vsr49 49
|
||||
#define vsr50 50
|
||||
#define vsr51 51
|
||||
#define vsr52 52
|
||||
#define vsr53 53
|
||||
#define vsr54 54
|
||||
#define vsr55 55
|
||||
#define vsr56 56
|
||||
#define vsr57 57
|
||||
#define vsr58 58
|
||||
#define vsr59 59
|
||||
#define vsr60 60
|
||||
#define vsr61 61
|
||||
#define vsr62 62
|
||||
#define vsr63 63
|
||||
|
||||
/* SPE Registers (EVPRs) */
|
||||
|
||||
#define evr0 0
|
||||
#define evr1 1
|
||||
#define evr2 2
|
||||
#define evr3 3
|
||||
#define evr4 4
|
||||
#define evr5 5
|
||||
#define evr6 6
|
||||
#define evr7 7
|
||||
#define evr8 8
|
||||
#define evr9 9
|
||||
#define evr10 10
|
||||
#define evr11 11
|
||||
#define evr12 12
|
||||
#define evr13 13
|
||||
#define evr14 14
|
||||
#define evr15 15
|
||||
#define evr16 16
|
||||
#define evr17 17
|
||||
#define evr18 18
|
||||
#define evr19 19
|
||||
#define evr20 20
|
||||
#define evr21 21
|
||||
#define evr22 22
|
||||
#define evr23 23
|
||||
#define evr24 24
|
||||
#define evr25 25
|
||||
#define evr26 26
|
||||
#define evr27 27
|
||||
#define evr28 28
|
||||
#define evr29 29
|
||||
#define evr30 30
|
||||
#define evr31 31
|
||||
#define evr0 0
|
||||
#define evr1 1
|
||||
#define evr2 2
|
||||
#define evr3 3
|
||||
#define evr4 4
|
||||
#define evr5 5
|
||||
#define evr6 6
|
||||
#define evr7 7
|
||||
#define evr8 8
|
||||
#define evr9 9
|
||||
#define evr10 10
|
||||
#define evr11 11
|
||||
#define evr12 12
|
||||
#define evr13 13
|
||||
#define evr14 14
|
||||
#define evr15 15
|
||||
#define evr16 16
|
||||
#define evr17 17
|
||||
#define evr18 18
|
||||
#define evr19 19
|
||||
#define evr20 20
|
||||
#define evr21 21
|
||||
#define evr22 22
|
||||
#define evr23 23
|
||||
#define evr24 24
|
||||
#define evr25 25
|
||||
#define evr26 26
|
||||
#define evr27 27
|
||||
#define evr28 28
|
||||
#define evr29 29
|
||||
#define evr30 30
|
||||
#define evr31 31
|
||||
|
||||
/* some stab codes */
|
||||
#define N_FUN 36
|
||||
#define N_RSYM 64
|
||||
#define N_SLINE 68
|
||||
#define N_SLINE 68
|
||||
#define N_SO 100
|
||||
|
||||
#define __REG_R0 0
|
||||
#define __REG_R1 1
|
||||
#define __REG_R2 2
|
||||
#define __REG_R3 3
|
||||
#define __REG_R4 4
|
||||
#define __REG_R5 5
|
||||
#define __REG_R6 6
|
||||
#define __REG_R7 7
|
||||
#define __REG_R8 8
|
||||
#define __REG_R9 9
|
||||
#define __REG_R10 10
|
||||
#define __REG_R11 11
|
||||
#define __REG_R12 12
|
||||
#define __REG_R13 13
|
||||
#define __REG_R14 14
|
||||
#define __REG_R15 15
|
||||
#define __REG_R16 16
|
||||
#define __REG_R17 17
|
||||
#define __REG_R18 18
|
||||
#define __REG_R19 19
|
||||
#define __REG_R20 20
|
||||
#define __REG_R21 21
|
||||
#define __REG_R22 22
|
||||
#define __REG_R23 23
|
||||
#define __REG_R24 24
|
||||
#define __REG_R25 25
|
||||
#define __REG_R26 26
|
||||
#define __REG_R27 27
|
||||
#define __REG_R28 28
|
||||
#define __REG_R29 29
|
||||
#define __REG_R30 30
|
||||
#define __REG_R31 31
|
||||
#define __REG_R0 0
|
||||
#define __REG_R1 1
|
||||
#define __REG_R2 2
|
||||
#define __REG_R3 3
|
||||
#define __REG_R4 4
|
||||
#define __REG_R5 5
|
||||
#define __REG_R6 6
|
||||
#define __REG_R7 7
|
||||
#define __REG_R8 8
|
||||
#define __REG_R9 9
|
||||
#define __REG_R10 10
|
||||
#define __REG_R11 11
|
||||
#define __REG_R12 12
|
||||
#define __REG_R13 13
|
||||
#define __REG_R14 14
|
||||
#define __REG_R15 15
|
||||
#define __REG_R16 16
|
||||
#define __REG_R17 17
|
||||
#define __REG_R18 18
|
||||
#define __REG_R19 19
|
||||
#define __REG_R20 20
|
||||
#define __REG_R21 21
|
||||
#define __REG_R22 22
|
||||
#define __REG_R23 23
|
||||
#define __REG_R24 24
|
||||
#define __REG_R25 25
|
||||
#define __REG_R26 26
|
||||
#define __REG_R27 27
|
||||
#define __REG_R28 28
|
||||
#define __REG_R29 29
|
||||
#define __REG_R30 30
|
||||
#define __REG_R31 31
|
||||
|
||||
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __CR_LINKAGE_H__ */
|
||||
|
|
|
|||
|
|
@ -29,11 +29,11 @@ static inline unsigned page_shift(void)
|
|||
* on ppc64, then we need refrain using PAGE_SIZE in criu and use
|
||||
* page_size() across sources (as it may differ on ppc64).
|
||||
*/
|
||||
#define PAGE_SIZE page_size()
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_SHIFT page_shift()
|
||||
#define PAGE_SIZE page_size()
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_SHIFT page_shift()
|
||||
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
|
||||
#else /* CR_NOGLIBC */
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@
|
|||
#include "common/arch/s390/asm/atomic_ops.h"
|
||||
#include "common/compiler.h"
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
#define ATOMIC_INIT(i) \
|
||||
{ \
|
||||
(i) \
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int counter;
|
||||
|
|
@ -14,17 +17,13 @@ static inline int atomic_read(const atomic_t *v)
|
|||
{
|
||||
int c;
|
||||
|
||||
asm volatile(
|
||||
" l %0,%1\n"
|
||||
: "=d" (c) : "Q" (v->counter));
|
||||
asm volatile(" l %0,%1\n" : "=d"(c) : "Q"(v->counter));
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline void atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
asm volatile(
|
||||
" st %1,%0\n"
|
||||
: "=Q" (v->counter) : "d" (i));
|
||||
asm volatile(" st %1,%0\n" : "=Q"(v->counter) : "d"(i));
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
|
|
@ -32,25 +31,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
|||
return __atomic_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
__atomic_add(i, &v->counter);
|
||||
}
|
||||
|
||||
#define atomic_inc(_v) atomic_add(1, _v)
|
||||
#define atomic_inc_return(_v) atomic_add_return(1, _v)
|
||||
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
|
||||
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
|
||||
#define atomic_dec(_v) atomic_sub(1, _v)
|
||||
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
|
||||
#define atomic_inc(_v) atomic_add(1, _v)
|
||||
#define atomic_inc_return(_v) atomic_add_return(1, _v)
|
||||
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
|
||||
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
|
||||
#define atomic_dec(_v) atomic_sub(1, _v)
|
||||
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
|
||||
|
||||
#define ATOMIC_OPS(op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__atomic_##op(i, &v->counter); \
|
||||
} \
|
||||
#define ATOMIC_OPS(op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__atomic_##op(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC_OPS(and)
|
||||
ATOMIC_OPS(or)
|
||||
|
|
@ -64,4 +62,3 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|||
}
|
||||
|
||||
#endif /* __ARCH_S390_ATOMIC__ */
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef __ARCH_S390_ATOMIC_OPS__
|
||||
#define __ARCH_S390_ATOMIC_OPS__
|
||||
|
||||
/* clang-format off */
|
||||
#define __ATOMIC_OP(op_name, op_string) \
|
||||
static inline int op_name(int val, int *ptr) \
|
||||
{ \
|
||||
|
|
@ -15,18 +16,20 @@ static inline int op_name(int val, int *ptr) \
|
|||
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
|
||||
return old; \
|
||||
}
|
||||
/* clang-format on */
|
||||
|
||||
#define __ATOMIC_OPS(op_name, op_string) \
|
||||
__ATOMIC_OP(op_name, op_string) \
|
||||
#define __ATOMIC_OPS(op_name, op_string) \
|
||||
__ATOMIC_OP(op_name, op_string) \
|
||||
__ATOMIC_OP(op_name##_barrier, op_string)
|
||||
|
||||
__ATOMIC_OPS(__atomic_add, "ar")
|
||||
__ATOMIC_OPS(__atomic_and, "nr")
|
||||
__ATOMIC_OPS(__atomic_or, "or")
|
||||
__ATOMIC_OPS(__atomic_or, "or")
|
||||
__ATOMIC_OPS(__atomic_xor, "xr")
|
||||
|
||||
#undef __ATOMIC_OPS
|
||||
|
||||
/* clang-format off */
|
||||
#define __ATOMIC64_OP(op_name, op_string) \
|
||||
static inline long op_name(long val, long *ptr) \
|
||||
{ \
|
||||
|
|
@ -41,33 +44,34 @@ static inline long op_name(long val, long *ptr) \
|
|||
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
|
||||
return old; \
|
||||
}
|
||||
/* clang-format on */
|
||||
|
||||
#define __ATOMIC64_OPS(op_name, op_string) \
|
||||
__ATOMIC64_OP(op_name, op_string) \
|
||||
#define __ATOMIC64_OPS(op_name, op_string) \
|
||||
__ATOMIC64_OP(op_name, op_string) \
|
||||
__ATOMIC64_OP(op_name##_barrier, op_string)
|
||||
|
||||
__ATOMIC64_OPS(__atomic64_add, "agr")
|
||||
__ATOMIC64_OPS(__atomic64_and, "ngr")
|
||||
__ATOMIC64_OPS(__atomic64_or, "ogr")
|
||||
__ATOMIC64_OPS(__atomic64_or, "ogr")
|
||||
__ATOMIC64_OPS(__atomic64_xor, "xgr")
|
||||
|
||||
#undef __ATOMIC64_OPS
|
||||
|
||||
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
asm volatile(" cs %[old],%[new],%[ptr]"
|
||||
: [old] "+d"(old), [ptr] "+Q"(*ptr)
|
||||
: [new] "d"(new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
asm volatile(" csg %[old],%[new],%[ptr]"
|
||||
: [old] "+d"(old), [ptr] "+Q"(*ptr)
|
||||
: [new] "d"(new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,15 +5,13 @@
|
|||
#include "common/compiler.h"
|
||||
#include "common/arch/s390/asm/atomic_ops.h"
|
||||
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
|
||||
|
||||
#define DECLARE_BITMAP(name,bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
#define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
||||
static inline unsigned long *
|
||||
__bitops_word(unsigned long nr, volatile unsigned long *ptr)
|
||||
static inline unsigned long *__bitops_word(unsigned long nr, volatile unsigned long *ptr)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
|
|
@ -21,8 +19,7 @@ __bitops_word(unsigned long nr, volatile unsigned long *ptr)
|
|||
return (unsigned long *)addr;
|
||||
}
|
||||
|
||||
static inline unsigned char *
|
||||
__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
|
||||
static inline unsigned char *__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
|
||||
{
|
||||
return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
|
||||
}
|
||||
|
|
@ -33,7 +30,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
unsigned long mask;
|
||||
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
__atomic64_or((long) mask, (long *) addr);
|
||||
__atomic64_or((long)mask, (long *)addr);
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
|
|
@ -42,7 +39,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
unsigned long mask;
|
||||
|
||||
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
|
||||
__atomic64_and((long) mask, (long *) addr);
|
||||
__atomic64_and((long)mask, (long *)addr);
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
|
|
@ -51,17 +48,16 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
unsigned long mask;
|
||||
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
__atomic64_xor((long) mask, (long *) addr);
|
||||
__atomic64_xor((long)mask, (long *)addr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long old, mask;
|
||||
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
old = __atomic64_or_barrier((long) mask, (long *) addr);
|
||||
old = __atomic64_or_barrier((long)mask, (long *)addr);
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
|
|
@ -118,9 +114,8 @@ static inline unsigned long __ffs(unsigned long word)
|
|||
|
||||
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
|
||||
|
||||
static inline unsigned long _find_next_bit(const unsigned long *addr,
|
||||
unsigned long nbits, unsigned long start,
|
||||
unsigned long invert)
|
||||
static inline unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start,
|
||||
unsigned long invert)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
|
|
@ -143,16 +138,13 @@ static inline unsigned long _find_next_bit(const unsigned long *addr,
|
|||
return min(start + __ffs(tmp), nbits);
|
||||
}
|
||||
|
||||
static inline unsigned long find_next_bit(const unsigned long *addr,
|
||||
unsigned long size,
|
||||
unsigned long offset)
|
||||
static inline unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
|
||||
{
|
||||
return _find_next_bit(addr, size, offset, 0UL);
|
||||
}
|
||||
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); \
|
||||
i < sizeof(bitmask); \
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); i < sizeof(bitmask); \
|
||||
i = find_next_bit(bitmask, sizeof(bitmask), i + 1))
|
||||
|
||||
#endif /* _S390_BITOPS_H */
|
||||
|
|
|
|||
|
|
@ -5,18 +5,17 @@
|
|||
|
||||
#define __ALIGN .align 4, 0x07
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, @function; \
|
||||
__ALIGN; \
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, @function; \
|
||||
__ALIGN; \
|
||||
name:
|
||||
|
||||
#define END(name) \
|
||||
.size name, . - name
|
||||
#define END(name) .size name, .- name
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -2,18 +2,18 @@
|
|||
#define __CR_ASM_PAGE_H__
|
||||
|
||||
#ifndef PAGE_SHIFT
|
||||
#define PAGE_SHIFT 12
|
||||
#define PAGE_SHIFT 12
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
#define PAGE_SIZE (1UL << PAGE_SHIFT)
|
||||
#define PAGE_SIZE (1UL << PAGE_SHIFT)
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_MASK
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#endif
|
||||
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define page_size() PAGE_SIZE
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define page_size() PAGE_SIZE
|
||||
|
||||
#endif /* __CR_ASM_PAGE_H__ */
|
||||
|
|
|
|||
|
|
@ -2,27 +2,27 @@
|
|||
#define __CR_ASM_H__
|
||||
|
||||
#ifdef __GCC_ASM_FLAG_OUTPUTS__
|
||||
# define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
|
||||
# define CC_OUT(c) "=@cc" #c
|
||||
#define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
|
||||
#define CC_OUT(c) "=@cc" #c
|
||||
#else
|
||||
# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
|
||||
# define CC_OUT(c) [_cc_ ## c] "=qm"
|
||||
#define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
|
||||
#define CC_OUT(c) [_cc_##c] "=qm"
|
||||
#endif
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
# define __ASM_FORM(x) x
|
||||
#define __ASM_FORM(x) x
|
||||
#else
|
||||
# define __ASM_FORM(x) " " #x " "
|
||||
#define __ASM_FORM(x) " " #x " "
|
||||
#endif
|
||||
|
||||
#ifndef __x86_64__
|
||||
/* 32 bit */
|
||||
# define __ASM_SEL(a,b) __ASM_FORM(a)
|
||||
#define __ASM_SEL(a, b) __ASM_FORM(a)
|
||||
#else
|
||||
/* 64 bit */
|
||||
# define __ASM_SEL(a,b) __ASM_FORM(b)
|
||||
#define __ASM_SEL(a, b) __ASM_FORM(b)
|
||||
#endif
|
||||
|
||||
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, inst##q##__VA_ARGS__)
|
||||
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, inst##q##__VA_ARGS__)
|
||||
|
||||
#endif /* __CR_ASM_H__ */
|
||||
|
|
|
|||
|
|
@ -7,7 +7,10 @@ typedef struct {
|
|||
int counter;
|
||||
} atomic_t;
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
#define ATOMIC_INIT(i) \
|
||||
{ \
|
||||
(i) \
|
||||
}
|
||||
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
|
|
@ -21,37 +24,29 @@ static inline void atomic_set(atomic_t *v, int i)
|
|||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "addl %1,%0"
|
||||
: "+m" (v->counter)
|
||||
: "ir" (i));
|
||||
asm volatile(LOCK_PREFIX "addl %1,%0" : "+m"(v->counter) : "ir"(i));
|
||||
}
|
||||
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "subl %1,%0"
|
||||
: "+m" (v->counter)
|
||||
: "ir" (i));
|
||||
asm volatile(LOCK_PREFIX "subl %1,%0" : "+m"(v->counter) : "ir"(i));
|
||||
}
|
||||
|
||||
static inline void atomic_inc(atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "incl %0"
|
||||
: "+m" (v->counter));
|
||||
asm volatile(LOCK_PREFIX "incl %0" : "+m"(v->counter));
|
||||
}
|
||||
|
||||
static inline void atomic_dec(atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "decl %0"
|
||||
: "+m" (v->counter));
|
||||
asm volatile(LOCK_PREFIX "decl %0" : "+m"(v->counter));
|
||||
}
|
||||
|
||||
static inline int atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
asm volatile(LOCK_PREFIX "decl %0; sete %1"
|
||||
: "+m" (v->counter), "=qm" (c)
|
||||
: : "memory");
|
||||
asm volatile(LOCK_PREFIX "decl %0; sete %1" : "+m"(v->counter), "=qm"(c) : : "memory");
|
||||
return c != 0;
|
||||
}
|
||||
|
||||
|
|
@ -65,8 +60,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|||
return atomic_add_return(-i, v);
|
||||
}
|
||||
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -6,47 +6,46 @@
|
|||
#include "common/arch/x86/asm/asm.h"
|
||||
#include "common/asm/bitsperlong.h"
|
||||
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
|
||||
#define DECLARE_BITMAP(name, bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
#define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
||||
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
|
||||
/* Technically wrong, but this avoids compilation errors on some gcc
|
||||
versions. */
|
||||
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
|
||||
#define BITOP_ADDR(x) "=m"(*(volatile long *)(x))
|
||||
#else
|
||||
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
|
||||
#define BITOP_ADDR(x) "+m"(*(volatile long *)(x))
|
||||
#endif
|
||||
|
||||
#define ADDR BITOP_ADDR(addr)
|
||||
#define ADDR BITOP_ADDR(addr)
|
||||
|
||||
static inline void set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
|
||||
asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir"(nr) : "memory");
|
||||
}
|
||||
|
||||
static inline void change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
|
||||
asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir"(nr));
|
||||
}
|
||||
|
||||
static inline bool test_bit(long nr, volatile const unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
asm volatile(__ASM_SIZE(bt) " %2,%1"
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (oldbit)
|
||||
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
|
||||
asm volatile(__ASM_SIZE(bt) " %2,%1" CC_SET(c)
|
||||
: CC_OUT(c)(oldbit)
|
||||
: "m"(*(unsigned long *)addr), "Ir"(nr)
|
||||
: "memory");
|
||||
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static inline void clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
|
||||
asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir"(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -61,10 +60,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
|||
{
|
||||
bool oldbit;
|
||||
|
||||
asm(__ASM_SIZE(bts) " %2,%1"
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (oldbit)
|
||||
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
|
||||
asm(__ASM_SIZE(bts) " %2,%1" CC_SET(c) : CC_OUT(c)(oldbit) : "m"(*(unsigned long *)addr), "Ir"(nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
|
|
@ -76,23 +72,19 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
|||
*/
|
||||
static inline unsigned long __ffs(unsigned long word)
|
||||
{
|
||||
asm("bsf %1,%0"
|
||||
: "=r" (word)
|
||||
: "rm" (word));
|
||||
asm("bsf %1,%0" : "=r"(word) : "rm"(word));
|
||||
return word;
|
||||
}
|
||||
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
|
||||
/*
|
||||
* Find the next set bit in a memory region.
|
||||
*/
|
||||
static inline
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
static inline unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG - 1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
|
|
@ -109,7 +101,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
while (size & ~(BITS_PER_LONG - 1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
|
|
@ -121,15 +113,14 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|||
|
||||
found_first:
|
||||
tmp &= (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); \
|
||||
i < sizeof(bitmask); \
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); i < sizeof(bitmask); \
|
||||
i = find_next_bit(bitmask, sizeof(bitmask), i + 1))
|
||||
|
||||
#endif /* __CR_BITOPS_H__ */
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@
|
|||
#define __CR_BITSPERLONG_H__
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
# define BITS_PER_LONG 64
|
||||
#define BITS_PER_LONG 64
|
||||
#else
|
||||
# define BITS_PER_LONG 32
|
||||
#define BITS_PER_LONG 32
|
||||
#endif
|
||||
|
||||
#endif /* __CR_BITSPERLONG_H__ */
|
||||
|
|
|
|||
|
|
@ -5,46 +5,38 @@
|
|||
|
||||
#define LOCK_PREFIX "\n\tlock; "
|
||||
|
||||
#define __X86_CASE_B 1
|
||||
#define __X86_CASE_W 2
|
||||
#define __X86_CASE_L 4
|
||||
#define __X86_CASE_Q 8
|
||||
#define __X86_CASE_B 1
|
||||
#define __X86_CASE_W 2
|
||||
#define __X86_CASE_L 4
|
||||
#define __X86_CASE_Q 8
|
||||
|
||||
/*
|
||||
* An exchange-type operation, which takes a value and a pointer, and
|
||||
* returns the old value. Make sure you never reach non-case statement
|
||||
* here, otherwise behaviour is undefined.
|
||||
*/
|
||||
#define __xchg_op(ptr, arg, op, lock) \
|
||||
({ \
|
||||
__typeof__ (*(ptr)) __ret = (arg); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case __X86_CASE_B: \
|
||||
asm volatile (lock #op "b %b0, %1\n" \
|
||||
: "+q" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_W: \
|
||||
asm volatile (lock #op "w %w0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_L: \
|
||||
asm volatile (lock #op "l %0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_Q: \
|
||||
asm volatile (lock #op "q %q0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
} \
|
||||
__ret; \
|
||||
#define __xchg_op(ptr, arg, op, lock) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret = (arg); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case __X86_CASE_B: \
|
||||
asm volatile(lock #op "b %b0, %1\n" : "+q"(__ret), "+m"(*(ptr)) : : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_W: \
|
||||
asm volatile(lock #op "w %w0, %1\n" : "+r"(__ret), "+m"(*(ptr)) : : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_L: \
|
||||
asm volatile(lock #op "l %0, %1\n" : "+r"(__ret), "+m"(*(ptr)) : : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_Q: \
|
||||
asm volatile(lock #op "q %q0, %1\n" : "+r"(__ret), "+m"(*(ptr)) : : "memory", "cc"); \
|
||||
break; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
|
||||
#define xadd(ptr, inc) __xadd((ptr), (inc), "lock ;")
|
||||
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
|
||||
#define xadd(ptr, inc) __xadd((ptr), (inc), "lock ;")
|
||||
|
||||
/* Borrowed from linux kernel arch/x86/include/asm/cmpxchg.h */
|
||||
|
||||
|
|
@ -53,55 +45,49 @@
|
|||
* store NEW in MEM. Return the initial value in MEM. Success is
|
||||
* indicated by comparing RETURN with OLD.
|
||||
*/
|
||||
#define __raw_cmpxchg(ptr, old, new, size, lock) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__typeof__(*(ptr)) __old = (old); \
|
||||
__typeof__(*(ptr)) __new = (new); \
|
||||
switch (size) { \
|
||||
case __X86_CASE_B: \
|
||||
{ \
|
||||
volatile uint8_t *__ptr = (volatile uint8_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgb %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "q" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_W: \
|
||||
{ \
|
||||
volatile uint16_t *__ptr = (volatile uint16_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgw %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "r" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_L: \
|
||||
{ \
|
||||
volatile uint32_t *__ptr = (volatile uint32_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgl %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "r" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_Q: \
|
||||
{ \
|
||||
volatile uint64_t *__ptr = (volatile uint64_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgq %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "r" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
#define __raw_cmpxchg(ptr, old, new, size, lock) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__typeof__(*(ptr)) __old = (old); \
|
||||
__typeof__(*(ptr)) __new = (new); \
|
||||
switch (size) { \
|
||||
case __X86_CASE_B: { \
|
||||
volatile uint8_t *__ptr = (volatile uint8_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgb %2,%1" \
|
||||
: "=a"(__ret), "+m"(*__ptr) \
|
||||
: "q"(__new), "0"(__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_W: { \
|
||||
volatile uint16_t *__ptr = (volatile uint16_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgw %2,%1" \
|
||||
: "=a"(__ret), "+m"(*__ptr) \
|
||||
: "r"(__new), "0"(__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_L: { \
|
||||
volatile uint32_t *__ptr = (volatile uint32_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgl %2,%1" \
|
||||
: "=a"(__ret), "+m"(*__ptr) \
|
||||
: "r"(__new), "0"(__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_Q: { \
|
||||
volatile uint64_t *__ptr = (volatile uint64_t *)(ptr); \
|
||||
asm volatile(lock "cmpxchgq %2,%1" \
|
||||
: "=a"(__ret), "+m"(*__ptr) \
|
||||
: "r"(__new), "0"(__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __cmpxchg(ptr, old, new, size) \
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
|
||||
#define cmpxchg(ptr, old, new) \
|
||||
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
|
||||
#define __cmpxchg(ptr, old, new, size) __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
|
||||
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, sizeof(*(ptr)))
|
||||
|
||||
#endif /* __CR_CMPXCHG_H__ */
|
||||
|
|
|
|||
|
|
@ -3,25 +3,24 @@
|
|||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define __ALIGN .align 4, 0x90
|
||||
#define __ALIGN_STR ".align 4, 0x90"
|
||||
#define __ALIGN .align 4, 0x90
|
||||
#define __ALIGN_STR ".align 4, 0x90"
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, @function; \
|
||||
__ALIGN; \
|
||||
#define ENTRY(name) \
|
||||
.globl name; \
|
||||
.type name, @function; \
|
||||
__ALIGN; \
|
||||
name:
|
||||
|
||||
#define END(sym) \
|
||||
.size sym, . - sym
|
||||
#define END(sym) .size sym, .- sym
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define __USER32_CS 0x23
|
||||
#define __USER_CS 0x33
|
||||
#define __USER32_CS 0x23
|
||||
#define __USER_CS 0x33
|
||||
|
||||
#endif /* __CR_LINKAGE_H__ */
|
||||
|
|
|
|||
|
|
@ -2,18 +2,18 @@
|
|||
#define __CR_ASM_PAGE_H__
|
||||
|
||||
#ifndef PAGE_SHIFT
|
||||
# define PAGE_SHIFT 12
|
||||
#define PAGE_SHIFT 12
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
# define PAGE_SIZE (1UL << PAGE_SHIFT)
|
||||
#define PAGE_SIZE (1UL << PAGE_SHIFT)
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_MASK
|
||||
# define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#endif
|
||||
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define page_size() PAGE_SIZE
|
||||
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
|
||||
#define page_size() PAGE_SIZE
|
||||
|
||||
#endif /* __CR_ASM_PAGE_H__ */
|
||||
|
|
|
|||
|
|
@ -10,23 +10,23 @@
|
|||
|
||||
#include "common/asm/bitsperlong.h"
|
||||
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
|
||||
#define DECLARE_BITMAP(name, bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
#define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
||||
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
|
||||
/* Technically wrong, but this avoids compilation errors on some gcc
|
||||
versions. */
|
||||
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
|
||||
#define BITOP_ADDR(x) "=m"(*(volatile long *)(x))
|
||||
#else
|
||||
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
|
||||
#define BITOP_ADDR(x) "+m"(*(volatile long *)(x))
|
||||
#endif
|
||||
|
||||
#define ADDR BITOP_ADDR(addr)
|
||||
#define ADDR BITOP_ADDR(addr)
|
||||
|
||||
static inline void set_bit(int nr, volatile unsigned long *addr) {
|
||||
static inline void set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
addr += nr / BITS_PER_LONG;
|
||||
*addr |= (1UL << (nr % BITS_PER_LONG));
|
||||
}
|
||||
|
|
@ -60,17 +60,15 @@ static inline unsigned long __ffs(unsigned long word)
|
|||
return __builtin_ffsl(word) - 1;
|
||||
}
|
||||
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
|
||||
/*
|
||||
* Find the next set bit in a memory region.
|
||||
*/
|
||||
static inline
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
static inline unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG - 1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
|
|
@ -87,7 +85,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
while (size & ~(BITS_PER_LONG - 1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
|
|
@ -99,15 +97,14 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|||
|
||||
found_first:
|
||||
tmp &= (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); \
|
||||
i < sizeof(bitmask); \
|
||||
#define for_each_bit(i, bitmask) \
|
||||
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); i < sizeof(bitmask); \
|
||||
i = find_next_bit(bitmask, sizeof(bitmask), i + 1))
|
||||
|
||||
#endif /* __CR_GENERIC_BITOPS_H__ */
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@
|
|||
#include <endian.h>
|
||||
|
||||
#if __BYTE_ORDER == __BIG_ENDIAN
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG - 1) & ~0x7)
|
||||
#else
|
||||
#define BITOP_LE_SWIZZLE 0
|
||||
#define BITOP_LE_SWIZZLE 0
|
||||
#endif
|
||||
|
||||
static inline int test_and_set_bit_le(int nr, void *addr)
|
||||
|
|
|
|||
|
|
@ -9,34 +9,34 @@
|
|||
#ifndef BUG_ON_HANDLER
|
||||
|
||||
#ifdef CR_NOGLIBC
|
||||
# define __raise()
|
||||
#define __raise()
|
||||
#else
|
||||
# define __raise() raise(SIGABRT)
|
||||
#define __raise() raise(SIGABRT)
|
||||
#endif
|
||||
|
||||
#ifndef __clang_analyzer__
|
||||
# ifndef pr_err
|
||||
# error pr_err macro must be defined
|
||||
# endif
|
||||
# define BUG_ON_HANDLER(condition) \
|
||||
do { \
|
||||
if ((condition)) { \
|
||||
pr_err("BUG at %s:%d\n", __FILE__, __LINE__); \
|
||||
__raise(); \
|
||||
*(volatile unsigned long *)NULL = 0xdead0000 + __LINE__; \
|
||||
__builtin_unreachable(); \
|
||||
} \
|
||||
#ifndef pr_err
|
||||
#error pr_err macro must be defined
|
||||
#endif
|
||||
#define BUG_ON_HANDLER(condition) \
|
||||
do { \
|
||||
if ((condition)) { \
|
||||
pr_err("BUG at %s:%d\n", __FILE__, __LINE__); \
|
||||
__raise(); \
|
||||
*(volatile unsigned long *)NULL = 0xdead0000 + __LINE__; \
|
||||
__builtin_unreachable(); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
# define BUG_ON_HANDLER(condition) \
|
||||
do { \
|
||||
assert(!condition); \
|
||||
#define BUG_ON_HANDLER(condition) \
|
||||
do { \
|
||||
assert(!condition); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* BUG_ON_HANDLER */
|
||||
|
||||
#define BUG_ON(condition) BUG_ON_HANDLER((condition))
|
||||
#define BUG() BUG_ON(true)
|
||||
#define BUG_ON(condition) BUG_ON_HANDLER((condition))
|
||||
#define BUG() BUG_ON(true)
|
||||
|
||||
#endif /* __CR_BUG_H__ */
|
||||
|
|
|
|||
|
|
@ -8,97 +8,113 @@
|
|||
*/
|
||||
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
#define NELEMS_AS_ARRAY(x,y) (sizeof(x) / sizeof((y)[0]))
|
||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
|
||||
#define NELEMS_AS_ARRAY(x, y) (sizeof(x) / sizeof((y)[0]))
|
||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
|
||||
|
||||
#define ASSIGN_TYPED(a, b) do { (a) = (typeof(a))(b); } while (0)
|
||||
#define ASSIGN_MEMBER(a, b, m) do { ASSIGN_TYPED((a)->m, (b)->m); } while (0)
|
||||
#define ASSIGN_TYPED(a, b) \
|
||||
do { \
|
||||
(a) = (typeof(a))(b); \
|
||||
} while (0)
|
||||
#define ASSIGN_MEMBER(a, b, m) \
|
||||
do { \
|
||||
ASSIGN_TYPED((a)->m, (b)->m); \
|
||||
} while (0)
|
||||
|
||||
#define __stringify_1(x...) #x
|
||||
#define __stringify(x...) __stringify_1(x)
|
||||
#define __stringify_1(x...) #x
|
||||
#define __stringify(x...) __stringify_1(x)
|
||||
|
||||
#define NORETURN __attribute__((__noreturn__))
|
||||
#define __packed __attribute__((__packed__))
|
||||
#define __used __attribute__((__used__))
|
||||
#define __maybe_unused __attribute__((unused))
|
||||
#define __always_unused __attribute__((unused))
|
||||
#define __must_check __attribute__((__warn_unused_result__))
|
||||
#define NORETURN __attribute__((__noreturn__))
|
||||
#define __packed __attribute__((__packed__))
|
||||
#define __used __attribute__((__used__))
|
||||
#define __maybe_unused __attribute__((unused))
|
||||
#define __always_unused __attribute__((unused))
|
||||
#define __must_check __attribute__((__warn_unused_result__))
|
||||
|
||||
#define __section(S) __attribute__ ((__section__(#S)))
|
||||
#define __section(S) __attribute__((__section__(#S)))
|
||||
|
||||
#ifndef __always_inline
|
||||
# define __always_inline inline __attribute__((always_inline))
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
|
||||
#ifndef always_inline
|
||||
# define always_inline __always_inline
|
||||
#define always_inline __always_inline
|
||||
#endif
|
||||
|
||||
#ifndef noinline
|
||||
# define noinline __attribute__((noinline))
|
||||
#define noinline __attribute__((noinline))
|
||||
#endif
|
||||
|
||||
#define __aligned(x) __attribute__((aligned(x)))
|
||||
#define __aligned(x) __attribute__((aligned(x)))
|
||||
|
||||
/*
|
||||
* Macro to define stack alignment.
|
||||
* aarch64 requires stack to be aligned to 16 bytes.
|
||||
*/
|
||||
#define __stack_aligned__ __attribute__((aligned(16)))
|
||||
#define __stack_aligned__ __attribute__((aligned(16)))
|
||||
|
||||
#ifndef offsetof
|
||||
# define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
|
||||
#define offsetof(TYPE, MEMBER) ((size_t) & ((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
|
||||
#define barrier() asm volatile("" ::: "memory")
|
||||
#define barrier() asm volatile("" ::: "memory")
|
||||
|
||||
#define container_of(ptr, type, member) ({ \
|
||||
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
|
||||
(type *)( (char *)__mptr - offsetof(type,member) );})
|
||||
#define container_of(ptr, type, member) \
|
||||
({ \
|
||||
const typeof(((type *)0)->member) *__mptr = (ptr); \
|
||||
(type *)((char *)__mptr - offsetof(type, member)); \
|
||||
})
|
||||
|
||||
#ifndef FIELD_SIZEOF
|
||||
# define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
|
||||
#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
|
||||
#endif
|
||||
|
||||
#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
|
||||
#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
|
||||
#define round_down(x, y) ((x) & ~__round_mask(x, y))
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
|
||||
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
|
||||
#define round_up(x, y) ((((x)-1) | __round_mask(x, y)) + 1)
|
||||
#define round_down(x, y) ((x) & ~__round_mask(x, y))
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
|
||||
#define ALIGN(x, a) (((x) + (a)-1) & ~((a)-1))
|
||||
|
||||
#define min(x, y) ({ \
|
||||
typeof(x) _min1 = (x); \
|
||||
typeof(y) _min2 = (y); \
|
||||
(void) (&_min1 == &_min2); \
|
||||
_min1 < _min2 ? _min1 : _min2; })
|
||||
#define min(x, y) \
|
||||
({ \
|
||||
typeof(x) _min1 = (x); \
|
||||
typeof(y) _min2 = (y); \
|
||||
(void)(&_min1 == &_min2); \
|
||||
_min1 < _min2 ? _min1 : _min2; \
|
||||
})
|
||||
|
||||
#define max(x, y) ({ \
|
||||
typeof(x) _max1 = (x); \
|
||||
typeof(y) _max2 = (y); \
|
||||
(void) (&_max1 == &_max2); \
|
||||
_max1 > _max2 ? _max1 : _max2; })
|
||||
#define max(x, y) \
|
||||
({ \
|
||||
typeof(x) _max1 = (x); \
|
||||
typeof(y) _max2 = (y); \
|
||||
(void)(&_max1 == &_max2); \
|
||||
_max1 > _max2 ? _max1 : _max2; \
|
||||
})
|
||||
|
||||
#define min_t(type, x, y) ({ \
|
||||
type __min1 = (x); \
|
||||
type __min2 = (y); \
|
||||
__min1 < __min2 ? __min1: __min2; })
|
||||
#define min_t(type, x, y) \
|
||||
({ \
|
||||
type __min1 = (x); \
|
||||
type __min2 = (y); \
|
||||
__min1 < __min2 ? __min1 : __min2; \
|
||||
})
|
||||
|
||||
#define max_t(type, x, y) ({ \
|
||||
type __max1 = (x); \
|
||||
type __max2 = (y); \
|
||||
__max1 > __max2 ? __max1: __max2; })
|
||||
#define max_t(type, x, y) \
|
||||
({ \
|
||||
type __max1 = (x); \
|
||||
type __max2 = (y); \
|
||||
__max1 > __max2 ? __max1 : __max2; \
|
||||
})
|
||||
|
||||
#define SWAP(x, y) \
|
||||
do { \
|
||||
typeof(x) ____val = x; \
|
||||
x = y; \
|
||||
y = ____val; \
|
||||
#define SWAP(x, y) \
|
||||
do { \
|
||||
typeof(x) ____val = x; \
|
||||
x = y; \
|
||||
y = ____val; \
|
||||
} while (0)
|
||||
|
||||
#define is_log2(v) (((v) & ((v) - 1)) == 0)
|
||||
#define is_log2(v) (((v) & ((v)-1)) == 0)
|
||||
|
||||
/*
|
||||
* Use "__ignore_value" to avoid a warning when using a function declared with
|
||||
|
|
@ -121,9 +137,13 @@
|
|||
* (See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 for the details)
|
||||
*/
|
||||
#if 3 < __GNUC__ + (4 <= __GNUC_MINOR__)
|
||||
# define __ignore_value(x) ({ __typeof__ (x) __x = (x); (void) __x; })
|
||||
#define __ignore_value(x) \
|
||||
({ \
|
||||
__typeof__(x) __x = (x); \
|
||||
(void)__x; \
|
||||
})
|
||||
#else
|
||||
# define __ignore_value(x) ((void) (x))
|
||||
#define __ignore_value(x) ((void)(x))
|
||||
#endif
|
||||
|
||||
#endif /* __CR_COMPILER_H__ */
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
*
|
||||
* Thus we may encode error number in low bits.
|
||||
*/
|
||||
#define MAX_ERRNO 4095
|
||||
#define MAX_ERRNO 4095
|
||||
|
||||
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
|
||||
|
||||
|
|
|
|||
|
|
@ -9,15 +9,18 @@
|
|||
#include "common/compiler.h"
|
||||
|
||||
#define POISON_POINTER_DELTA 0
|
||||
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON1 ((void *)0x00100100 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON2 ((void *)0x00200200 + POISON_POINTER_DELTA)
|
||||
|
||||
struct list_head {
|
||||
struct list_head *prev, *next;
|
||||
};
|
||||
|
||||
#define LIST_HEAD_INIT(name) { &(name), &(name) }
|
||||
#define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name)
|
||||
#define LIST_HEAD_INIT(name) \
|
||||
{ \
|
||||
&(name), &(name) \
|
||||
}
|
||||
#define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name)
|
||||
|
||||
static inline void INIT_LIST_HEAD(struct list_head *list)
|
||||
{
|
||||
|
|
@ -25,14 +28,12 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
|
|||
list->prev = list;
|
||||
}
|
||||
|
||||
static inline void __list_add(struct list_head *new,
|
||||
struct list_head *prev,
|
||||
struct list_head *next)
|
||||
static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
|
||||
{
|
||||
next->prev = new;
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
prev->next = new;
|
||||
next->prev = new;
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
prev->next = new;
|
||||
}
|
||||
|
||||
static inline void list_add(struct list_head *new, struct list_head *head)
|
||||
|
|
@ -45,7 +46,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
|
|||
__list_add(new, head->prev, head);
|
||||
}
|
||||
|
||||
static inline void __list_del(struct list_head * prev, struct list_head * next)
|
||||
static inline void __list_del(struct list_head *prev, struct list_head *next)
|
||||
{
|
||||
next->prev = prev;
|
||||
prev->next = next;
|
||||
|
|
@ -63,17 +64,15 @@ static inline void list_del(struct list_head *entry)
|
|||
entry->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
static inline void list_replace(struct list_head *old,
|
||||
struct list_head *new)
|
||||
static inline void list_replace(struct list_head *old, struct list_head *new)
|
||||
{
|
||||
new->next = old->next;
|
||||
new->next->prev = new;
|
||||
new->prev = old->prev;
|
||||
new->prev->next = new;
|
||||
new->next = old->next;
|
||||
new->next->prev = new;
|
||||
new->prev = old->prev;
|
||||
new->prev->next = new;
|
||||
}
|
||||
|
||||
static inline void list_replace_init(struct list_head *old,
|
||||
struct list_head *new)
|
||||
static inline void list_replace_init(struct list_head *old, struct list_head *new)
|
||||
{
|
||||
list_replace(old, new);
|
||||
INIT_LIST_HEAD(old);
|
||||
|
|
@ -91,21 +90,18 @@ static inline void list_move(struct list_head *list, struct list_head *head)
|
|||
list_add(list, head);
|
||||
}
|
||||
|
||||
static inline void list_move_tail(struct list_head *list,
|
||||
struct list_head *head)
|
||||
static inline void list_move_tail(struct list_head *list, struct list_head *head)
|
||||
{
|
||||
__list_del_entry(list);
|
||||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
static inline int list_is_last(const struct list_head *list,
|
||||
const struct list_head *head)
|
||||
static inline int list_is_last(const struct list_head *list, const struct list_head *head)
|
||||
{
|
||||
return list->next == head;
|
||||
}
|
||||
|
||||
static inline int list_is_first(const struct list_head *list,
|
||||
const struct list_head *head)
|
||||
static inline int list_is_first(const struct list_head *list, const struct list_head *head)
|
||||
{
|
||||
return list->prev == head;
|
||||
}
|
||||
|
|
@ -135,8 +131,7 @@ static inline int list_is_singular(const struct list_head *head)
|
|||
return !list_empty(head) && (head->next == head->prev);
|
||||
}
|
||||
|
||||
static inline void __list_cut_position(struct list_head *list,
|
||||
struct list_head *head, struct list_head *entry)
|
||||
static inline void __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry)
|
||||
{
|
||||
struct list_head *new_first = entry->next;
|
||||
list->next = head->next;
|
||||
|
|
@ -147,13 +142,11 @@ static inline void __list_cut_position(struct list_head *list,
|
|||
new_first->prev = head;
|
||||
}
|
||||
|
||||
static inline void list_cut_position(struct list_head *list,
|
||||
struct list_head *head, struct list_head *entry)
|
||||
static inline void list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry)
|
||||
{
|
||||
if (list_empty(head))
|
||||
return;
|
||||
if (list_is_singular(head) &&
|
||||
(head->next != entry && head != entry))
|
||||
if (list_is_singular(head) && (head->next != entry && head != entry))
|
||||
return;
|
||||
if (entry == head)
|
||||
INIT_LIST_HEAD(list);
|
||||
|
|
@ -161,36 +154,31 @@ static inline void list_cut_position(struct list_head *list,
|
|||
__list_cut_position(list, head, entry);
|
||||
}
|
||||
|
||||
static inline void __list_splice(const struct list_head *list,
|
||||
struct list_head *prev,
|
||||
struct list_head *next)
|
||||
static inline void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next)
|
||||
{
|
||||
struct list_head *first = list->next;
|
||||
struct list_head *last = list->prev;
|
||||
|
||||
first->prev = prev;
|
||||
prev->next = first;
|
||||
first->prev = prev;
|
||||
prev->next = first;
|
||||
|
||||
last->next = next;
|
||||
next->prev = last;
|
||||
last->next = next;
|
||||
next->prev = last;
|
||||
}
|
||||
|
||||
static inline void list_splice(const struct list_head *list,
|
||||
struct list_head *head)
|
||||
static inline void list_splice(const struct list_head *list, struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice(list, head, head->next);
|
||||
}
|
||||
|
||||
static inline void list_splice_tail(struct list_head *list,
|
||||
struct list_head *head)
|
||||
static inline void list_splice_tail(struct list_head *list, struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice(list, head->prev, head);
|
||||
}
|
||||
|
||||
static inline void list_splice_init(struct list_head *list,
|
||||
struct list_head *head)
|
||||
static inline void list_splice_init(struct list_head *list, struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list)) {
|
||||
__list_splice(list, head, head->next);
|
||||
|
|
@ -198,8 +186,7 @@ static inline void list_splice_init(struct list_head *list,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void list_splice_tail_init(struct list_head *list,
|
||||
struct list_head *head)
|
||||
static inline void list_splice_tail_init(struct list_head *list, struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list)) {
|
||||
__list_splice(list, head->prev, head);
|
||||
|
|
@ -207,79 +194,60 @@ static inline void list_splice_tail_init(struct list_head *list,
|
|||
}
|
||||
}
|
||||
|
||||
#define list_entry(ptr, type, member) \
|
||||
container_of(ptr, type, member)
|
||||
#define list_entry(ptr, type, member) container_of(ptr, type, member)
|
||||
|
||||
#define list_first_entry(ptr, type, member) \
|
||||
list_entry((ptr)->next, type, member)
|
||||
#define list_first_entry(ptr, type, member) list_entry((ptr)->next, type, member)
|
||||
|
||||
#define list_for_each(pos, head) \
|
||||
for (pos = (head)->next; pos != (head); pos = pos->next)
|
||||
#define list_for_each(pos, head) for (pos = (head)->next; pos != (head); pos = pos->next)
|
||||
|
||||
#define list_for_each_prev(pos, head) \
|
||||
for (pos = (head)->prev; pos != (head); pos = pos->prev)
|
||||
#define list_for_each_prev(pos, head) for (pos = (head)->prev; pos != (head); pos = pos->prev)
|
||||
|
||||
#define list_for_each_safe(pos, n, head) \
|
||||
for (pos = (head)->next, n = pos->next; pos != (head); \
|
||||
pos = n, n = pos->next)
|
||||
#define list_for_each_safe(pos, n, head) for (pos = (head)->next, n = pos->next; pos != (head); pos = n, n = pos->next)
|
||||
|
||||
#define list_for_each_prev_safe(pos, n, head) \
|
||||
for (pos = (head)->prev, n = pos->prev; \
|
||||
pos != (head); \
|
||||
pos = n, n = pos->prev)
|
||||
#define list_for_each_prev_safe(pos, n, head) \
|
||||
for (pos = (head)->prev, n = pos->prev; pos != (head); pos = n, n = pos->prev)
|
||||
|
||||
#define list_for_each_entry(pos, head, member) \
|
||||
for (pos = list_entry((head)->next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
#define list_for_each_entry(pos, head, member) \
|
||||
for (pos = list_entry((head)->next, typeof(*pos), member); &pos->member != (head); \
|
||||
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||
|
||||
#define list_for_each_entry_reverse(pos, head, member) \
|
||||
for (pos = list_entry((head)->prev, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
#define list_for_each_entry_reverse(pos, head, member) \
|
||||
for (pos = list_entry((head)->prev, typeof(*pos), member); &pos->member != (head); \
|
||||
pos = list_entry(pos->member.prev, typeof(*pos), member))
|
||||
|
||||
#define list_prepare_entry(pos, head, member) \
|
||||
((pos) ? : list_entry(head, typeof(*pos), member))
|
||||
#define list_prepare_entry(pos, head, member) ((pos) ?: list_entry(head, typeof(*pos), member))
|
||||
|
||||
#define list_for_each_entry_continue(pos, head, member) \
|
||||
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
#define list_for_each_entry_continue(pos, head, member) \
|
||||
for (pos = list_entry(pos->member.next, typeof(*pos), member); &pos->member != (head); \
|
||||
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||
|
||||
#define list_for_each_entry_continue_reverse(pos, head, member) \
|
||||
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
#define list_for_each_entry_continue_reverse(pos, head, member) \
|
||||
for (pos = list_entry(pos->member.prev, typeof(*pos), member); &pos->member != (head); \
|
||||
pos = list_entry(pos->member.prev, typeof(*pos), member))
|
||||
|
||||
#define list_for_each_entry_from(pos, head, member) \
|
||||
for (; &pos->member != (head); \
|
||||
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||
#define list_for_each_entry_from(pos, head, member) \
|
||||
for (; &pos->member != (head); pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||
|
||||
#define list_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = list_entry((head)->next, typeof(*pos), member), \
|
||||
n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
#define list_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = list_entry((head)->next, typeof(*pos), member), \
|
||||
n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||
|
||||
#define list_for_each_entry_safe_continue(pos, n, head, member) \
|
||||
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
|
||||
n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||
|
||||
#define list_for_each_entry_safe_from(pos, n, head, member) \
|
||||
for (n = list_entry(pos->member.next, typeof(*pos), member); &pos->member != (head); \
|
||||
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||
|
||||
#define list_for_each_entry_safe_continue(pos, n, head, member) \
|
||||
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
|
||||
n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
|
||||
for (pos = list_entry((head)->prev, typeof(*pos), member), \
|
||||
n = list_entry(pos->member.prev, typeof(*pos), member); \
|
||||
&pos->member != (head); pos = n, n = list_entry(n->member.prev, typeof(*n), member))
|
||||
|
||||
#define list_for_each_entry_safe_from(pos, n, head, member) \
|
||||
for (n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||
|
||||
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
|
||||
for (pos = list_entry((head)->prev, typeof(*pos), member), \
|
||||
n = list_entry(pos->member.prev, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
|
||||
|
||||
#define list_safe_reset_next(pos, n, member) \
|
||||
n = list_entry(pos->member.next, typeof(*pos), member)
|
||||
#define list_safe_reset_next(pos, n, member) n = list_entry(pos->member.next, typeof(*pos), member)
|
||||
|
||||
/*
|
||||
* Double linked lists with a single pointer list head.
|
||||
|
|
@ -293,9 +261,12 @@ struct hlist_node {
|
|||
struct hlist_node *next, **pprev;
|
||||
};
|
||||
|
||||
#define HLIST_HEAD_INIT { .first = NULL }
|
||||
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
|
||||
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
|
||||
#define HLIST_HEAD_INIT \
|
||||
{ \
|
||||
.first = NULL \
|
||||
}
|
||||
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
|
||||
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
|
||||
|
||||
static inline void INIT_HLIST_NODE(struct hlist_node *h)
|
||||
{
|
||||
|
|
@ -348,8 +319,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
|
|||
}
|
||||
|
||||
/* next must be != NULL */
|
||||
static inline void hlist_add_before(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next)
|
||||
{
|
||||
n->pprev = next->pprev;
|
||||
n->next = next;
|
||||
|
|
@ -357,15 +327,14 @@ static inline void hlist_add_before(struct hlist_node *n,
|
|||
*(n->pprev) = n;
|
||||
}
|
||||
|
||||
static inline void hlist_add_after(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
static inline void hlist_add_after(struct hlist_node *n, struct hlist_node *next)
|
||||
{
|
||||
next->next = n->next;
|
||||
n->next = next;
|
||||
next->pprev = &n->next;
|
||||
|
||||
if (next->next)
|
||||
next->next->pprev = &next->next;
|
||||
next->next->pprev = &next->next;
|
||||
}
|
||||
|
||||
/* after that we'll appear to be on some hlist and hlist_del will work */
|
||||
|
|
@ -378,8 +347,7 @@ static inline void hlist_add_fake(struct hlist_node *n)
|
|||
* Move a list from one list head to another. Fixup the pprev
|
||||
* reference of the first entry if it exists.
|
||||
*/
|
||||
static inline void hlist_move_list(struct hlist_head *old,
|
||||
struct hlist_head *new)
|
||||
static inline void hlist_move_list(struct hlist_head *old, struct hlist_head *new)
|
||||
{
|
||||
new->first = old->first;
|
||||
if (new->first)
|
||||
|
|
@ -387,35 +355,35 @@ static inline void hlist_move_list(struct hlist_head *old,
|
|||
old->first = NULL;
|
||||
}
|
||||
|
||||
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
|
||||
#define hlist_entry(ptr, type, member) container_of(ptr, type, member)
|
||||
|
||||
#define hlist_for_each(pos, head) \
|
||||
for (pos = (head)->first; pos ; pos = pos->next)
|
||||
#define hlist_for_each(pos, head) for (pos = (head)->first; pos; pos = pos->next)
|
||||
|
||||
#define hlist_for_each_safe(pos, n, head) \
|
||||
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
|
||||
#define hlist_for_each_safe(pos, n, head) \
|
||||
for (pos = (head)->first; pos && ({ \
|
||||
n = pos->next; \
|
||||
1; \
|
||||
}); \
|
||||
pos = n)
|
||||
|
||||
#define hlist_entry_safe(ptr, type, member) \
|
||||
(ptr) ? hlist_entry(ptr, type, member) : NULL
|
||||
#define hlist_entry_safe(ptr, type, member) (ptr) ? hlist_entry(ptr, type, member) : NULL
|
||||
|
||||
#define hlist_for_each_entry(pos, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
|
||||
pos; \
|
||||
#define hlist_for_each_entry(pos, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
#define hlist_for_each_entry_continue(pos, member) \
|
||||
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
|
||||
pos; \
|
||||
#define hlist_for_each_entry_continue(pos, member) \
|
||||
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member); pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
#define hlist_for_each_entry_from(pos, member) \
|
||||
for (; pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
#define hlist_for_each_entry_from(pos, member) \
|
||||
for (; pos; pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
#define hlist_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \
|
||||
pos && ({ n = pos->member.next; 1; }); \
|
||||
#define hlist_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); pos && ({ \
|
||||
n = pos->member.next; \
|
||||
1; \
|
||||
}); \
|
||||
pos = hlist_entry_safe(n, typeof(*pos), member))
|
||||
|
||||
#endif /* __CR_LIST_H__ */
|
||||
|
|
|
|||
|
|
@ -11,23 +11,24 @@
|
|||
|
||||
/* scan-build complains about derefencing a NULL pointer here. */
|
||||
#ifndef __clang_analyzer__
|
||||
#define LOCK_BUG_ON(condition) \
|
||||
if ((condition)) \
|
||||
*(volatile unsigned long *)NULL = 0xdead0000 + __LINE__
|
||||
#define LOCK_BUG() LOCK_BUG_ON(1)
|
||||
#define LOCK_BUG_ON(condition) \
|
||||
if ((condition)) \
|
||||
*(volatile unsigned long *)NULL = 0xdead0000 + __LINE__
|
||||
#define LOCK_BUG() LOCK_BUG_ON(1)
|
||||
#endif /* __clang_analyzer__ */
|
||||
|
||||
#ifdef CR_NOGLIBC
|
||||
# include <compel/plugins/std/syscall.h>
|
||||
#include <compel/plugins/std/syscall.h>
|
||||
#else
|
||||
# include <unistd.h>
|
||||
# include <sys/syscall.h>
|
||||
static inline long sys_futex (uint32_t *addr1, int op, uint32_t val1,
|
||||
struct timespec *timeout, uint32_t *addr2, uint32_t val3)
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
static inline long sys_futex(uint32_t *addr1, int op, uint32_t val1, struct timespec *timeout, uint32_t *addr2,
|
||||
uint32_t val3)
|
||||
{
|
||||
int rc = syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
|
||||
if (rc == -1) rc = -errno;
|
||||
return rc;
|
||||
int rc = syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
|
||||
if (rc == -1)
|
||||
rc = -errno;
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
@ -35,8 +36,8 @@ typedef struct {
|
|||
atomic_t raw;
|
||||
} __aligned(sizeof(int)) futex_t;
|
||||
|
||||
#define FUTEX_ABORT_FLAG (0x80000000)
|
||||
#define FUTEX_ABORT_RAW (-1U)
|
||||
#define FUTEX_ABORT_FLAG (0x80000000)
|
||||
#define FUTEX_ABORT_RAW (-1U)
|
||||
|
||||
/* Get current futex @f value */
|
||||
static inline uint32_t futex_get(futex_t *f)
|
||||
|
|
@ -50,29 +51,27 @@ static inline void futex_set(futex_t *f, uint32_t v)
|
|||
atomic_set(&f->raw, (int)v);
|
||||
}
|
||||
|
||||
#define futex_init(f) futex_set(f, 0)
|
||||
#define futex_init(f) futex_set(f, 0)
|
||||
|
||||
/* Wait on futex @__f value @__v become in condition @__c */
|
||||
#define futex_wait_if_cond(__f, __v, __cond) \
|
||||
do { \
|
||||
int ret; \
|
||||
uint32_t tmp; \
|
||||
\
|
||||
while (1) { \
|
||||
struct timespec to = {.tv_sec = 120}; \
|
||||
tmp = futex_get(__f); \
|
||||
if ((tmp & FUTEX_ABORT_FLAG) || \
|
||||
(tmp __cond (__v))) \
|
||||
break; \
|
||||
ret = sys_futex((uint32_t *)&(__f)->raw.counter, FUTEX_WAIT,\
|
||||
tmp, &to, NULL, 0); \
|
||||
if (ret == -ETIMEDOUT) \
|
||||
continue; \
|
||||
if (ret == -EINTR || ret == -EWOULDBLOCK) \
|
||||
continue; \
|
||||
if (ret < 0) \
|
||||
LOCK_BUG(); \
|
||||
} \
|
||||
#define futex_wait_if_cond(__f, __v, __cond) \
|
||||
do { \
|
||||
int ret; \
|
||||
uint32_t tmp; \
|
||||
\
|
||||
while (1) { \
|
||||
struct timespec to = { .tv_sec = 120 }; \
|
||||
tmp = futex_get(__f); \
|
||||
if ((tmp & FUTEX_ABORT_FLAG) || (tmp __cond(__v))) \
|
||||
break; \
|
||||
ret = sys_futex((uint32_t *)&(__f)->raw.counter, FUTEX_WAIT, tmp, &to, NULL, 0); \
|
||||
if (ret == -ETIMEDOUT) \
|
||||
continue; \
|
||||
if (ret == -EINTR || ret == -EWOULDBLOCK) \
|
||||
continue; \
|
||||
if (ret < 0) \
|
||||
LOCK_BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Set futex @f to @v and wake up all waiters */
|
||||
|
|
@ -110,10 +109,16 @@ static inline void futex_inc_and_wake(futex_t *f)
|
|||
}
|
||||
|
||||
/* Plain increment futex @f value */
|
||||
static inline void futex_inc(futex_t *f) { atomic_inc(&f->raw); }
|
||||
static inline void futex_inc(futex_t *f)
|
||||
{
|
||||
atomic_inc(&f->raw);
|
||||
}
|
||||
|
||||
/* Plain decrement futex @f value */
|
||||
static inline void futex_dec(futex_t *f) { atomic_dec(&f->raw); }
|
||||
static inline void futex_dec(futex_t *f)
|
||||
{
|
||||
atomic_dec(&f->raw);
|
||||
}
|
||||
|
||||
/* Wait until futex @f value become @v */
|
||||
#define futex_wait_until(f, v) futex_wait_if_cond(f, v, ==)
|
||||
|
|
@ -137,7 +142,7 @@ static inline void futex_wait_while(futex_t *f, uint32_t v)
|
|||
}
|
||||
|
||||
typedef struct {
|
||||
atomic_t raw;
|
||||
atomic_t raw;
|
||||
} mutex_t;
|
||||
|
||||
static inline void mutex_init(mutex_t *m)
|
||||
|
|
|
|||
|
|
@ -14,25 +14,22 @@
|
|||
* the pressue on kernel memory manager and use predefined
|
||||
* known to work well size of the message buffer.
|
||||
*/
|
||||
#define CR_SCM_MSG_SIZE (1024)
|
||||
#define CR_SCM_MAX_FD (252)
|
||||
#define CR_SCM_MSG_SIZE (1024)
|
||||
#define CR_SCM_MAX_FD (252)
|
||||
|
||||
struct scm_fdset {
|
||||
struct msghdr hdr;
|
||||
struct iovec iov;
|
||||
char msg_buf[CR_SCM_MSG_SIZE];
|
||||
struct msghdr hdr;
|
||||
struct iovec iov;
|
||||
char msg_buf[CR_SCM_MSG_SIZE];
|
||||
};
|
||||
|
||||
#ifndef F_GETOWNER_UIDS
|
||||
#define F_GETOWNER_UIDS 17
|
||||
#define F_GETOWNER_UIDS 17
|
||||
#endif
|
||||
|
||||
extern int send_fds(int sock, struct sockaddr_un *saddr, int len,
|
||||
int *fds, int nr_fds, void *data, unsigned ch_size);
|
||||
extern int __recv_fds(int sock, int *fds, int nr_fds,
|
||||
void *data, unsigned ch_size, int flags);
|
||||
static inline int recv_fds(int sock, int *fds, int nr_fds,
|
||||
void *data, unsigned ch_size)
|
||||
extern int send_fds(int sock, struct sockaddr_un *saddr, int len, int *fds, int nr_fds, void *data, unsigned ch_size);
|
||||
extern int __recv_fds(int sock, int *fds, int nr_fds, void *data, unsigned ch_size, int flags);
|
||||
static inline int recv_fds(int sock, int *fds, int nr_fds, void *data, unsigned ch_size)
|
||||
{
|
||||
return __recv_fds(sock, fds, nr_fds, data, ch_size, 0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,43 +8,42 @@
|
|||
#error "Macro pr_err is needed."
|
||||
#endif
|
||||
|
||||
#define __xalloc(op, size, ...) \
|
||||
({ \
|
||||
void *___p = op( __VA_ARGS__ ); \
|
||||
if (!___p) \
|
||||
pr_err("%s: Can't allocate %li bytes\n", \
|
||||
__func__, (long)(size)); \
|
||||
___p; \
|
||||
#define __xalloc(op, size, ...) \
|
||||
({ \
|
||||
void *___p = op(__VA_ARGS__); \
|
||||
if (!___p) \
|
||||
pr_err("%s: Can't allocate %li bytes\n", __func__, (long)(size)); \
|
||||
___p; \
|
||||
})
|
||||
|
||||
#define xstrdup(str) __xalloc(strdup, strlen(str) + 1, str)
|
||||
#define xmalloc(size) __xalloc(malloc, size, size)
|
||||
#define xzalloc(size) __xalloc(calloc, size, 1, size)
|
||||
#define xrealloc(p, size) __xalloc(realloc, size, p, size)
|
||||
#define xstrdup(str) __xalloc(strdup, strlen(str) + 1, str)
|
||||
#define xmalloc(size) __xalloc(malloc, size, size)
|
||||
#define xzalloc(size) __xalloc(calloc, size, 1, size)
|
||||
#define xrealloc(p, size) __xalloc(realloc, size, p, size)
|
||||
|
||||
#define xfree(p) free(p)
|
||||
#define xfree(p) free(p)
|
||||
|
||||
#define xrealloc_safe(pptr, size) \
|
||||
({ \
|
||||
int __ret = -1; \
|
||||
void *new = xrealloc(*pptr, size); \
|
||||
if (new) { \
|
||||
*pptr = new; \
|
||||
__ret = 0; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
#define xrealloc_safe(pptr, size) \
|
||||
({ \
|
||||
int __ret = -1; \
|
||||
void *new = xrealloc(*pptr, size); \
|
||||
if (new) { \
|
||||
*pptr = new; \
|
||||
__ret = 0; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define xmemdup(ptr, size) \
|
||||
({ \
|
||||
void *new = xmalloc(size); \
|
||||
if (new) \
|
||||
memcpy(new, ptr, size); \
|
||||
new; \
|
||||
})
|
||||
#define xmemdup(ptr, size) \
|
||||
({ \
|
||||
void *new = xmalloc(size); \
|
||||
if (new) \
|
||||
memcpy(new, ptr, size); \
|
||||
new; \
|
||||
})
|
||||
|
||||
#define memzero_p(p) memset(p, 0, sizeof(*p))
|
||||
#define memzero(p, size) memset(p, 0, size)
|
||||
#define memzero_p(p) memset(p, 0, sizeof(*p))
|
||||
#define memzero(p, size) memset(p, 0, size)
|
||||
|
||||
/*
|
||||
* Helper for allocating trees with single xmalloc.
|
||||
|
|
@ -64,6 +63,6 @@ static inline void *xptr_pull_s(void **m, size_t s)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define xptr_pull(m, type) xptr_pull_s(m, sizeof(type))
|
||||
#define xptr_pull(m, type) xptr_pull_s(m, sizeof(type))
|
||||
|
||||
#endif /* __CR_XMALLOC_H__ */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue