include: add common header files for riscv64

Co-authored-by: Yixue Zhao <felicitia2010@gmail.com>
Co-authored-by: stove <stove@rivosinc.com>
Signed-off-by: Haorong Lu <ancientmodern4@gmail.com>
---
- rebased
- imported a page_size() type fix (authored by Cryolitia PukNgae)
Signed-off-by: PukNgae Cryolitia <Cryolitia@gmail.com>
Signed-off-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
This commit is contained in:
Haorong Lu 2023-08-01 11:49:50 -07:00 committed by Andrei Vagin
parent c49eb18f9f
commit d8f93e7bac
5 changed files with 232 additions and 0 deletions

View file

@ -0,0 +1,109 @@
#ifndef __CR_ATOMIC_H__
#define __CR_ATOMIC_H__
typedef struct {
int counter;
} atomic_t;
/* Copied from the Linux header arch/riscv/include/asm/barrier.h */
#define nop() __asm__ __volatile__("nop")
#define RISCV_FENCE(p, s) __asm__ __volatile__("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
#define mb() RISCV_FENCE(iorw, iorw)
#define rmb() RISCV_FENCE(ir, ir)
#define wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
#define __smp_mb() RISCV_FENCE(rw, rw)
#define __smp_rmb() RISCV_FENCE(r, r)
#define __smp_wmb() RISCV_FENCE(w, w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(r, rw); \
___p1; \
})
/* Copied from the Linux kernel header arch/riscv/include/asm/atomic.h */
static inline int atomic_read(const atomic_t *v)
{
return (*(volatile int *)&(v)->counter);
}
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
#define atomic_get atomic_read
static inline int atomic_add_return(int i, atomic_t *v)
{
int result;
asm volatile("amoadd.w.aqrl %1, %2, %0" : "+A"(v->counter), "=r"(result) : "r"(i) : "memory");
__smp_mb();
return result + i;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
static inline int atomic_inc(atomic_t *v)
{
return atomic_add_return(1, v) - 1;
}
static inline int atomic_add(int val, atomic_t *v)
{
return atomic_add_return(val, v) - val;
}
static inline int atomic_dec(atomic_t *v)
{
return atomic_sub_return(1, v) + 1;
}
/* true if the result is 0, or false for all other cases. */
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#define atomic_inc_return(v) (atomic_add_return(1, v))
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long tmp;
int oldval;
__smp_mb();
asm volatile("1:\n"
" lr.w %1, %2\n"
" bne %1, %3, 2f\n"
" sc.w %0, %4, %2\n"
" bnez %0, 1b\n"
"2:"
: "=&r"(tmp), "=&r"(oldval), "+A"(ptr->counter)
: "r"(old), "r"(new)
: "memory");
__smp_mb();
return oldval;
}
#endif /* __CR_ATOMIC_H__ */

View file

@ -0,0 +1,50 @@
#ifndef __CR_ASM_BITOPS_H__
#define __CR_ASM_BITOPS_H__
#include "common/compiler.h"
#include "common/asm-generic/bitops.h"
#define BITS_PER_LONG 64
#define BIT_MASK(nr) ((1##UL) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define __AMO(op) "amo" #op ".d"
#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
({ \
unsigned long __res, __mask; \
__mask = BIT_MASK(nr); \
__asm__ __volatile__(__AMO(op) #ord " %0, %2, %1" \
: "=r"(__res), "+A"(addr[BIT_WORD(nr)]) \
: "r"(mod(__mask)) \
: "memory"); \
((__res & __mask) != 0); \
})
#define __op_bit_ord(op, mod, nr, addr, ord) \
__asm__ __volatile__(__AMO(op) #ord " zero, %1, %0" \
: "+A"(addr[BIT_WORD(nr)]) \
: "r"(mod(BIT_MASK(nr))) \
: "memory");
#define __test_and_op_bit(op, mod, nr, addr) __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
#define __op_bit(op, mod, nr, addr) __op_bit_ord(op, mod, nr, addr, )
/* Bitmask modifiers */
#define __NOP(x) (x)
#define __NOT(x) (~(x))
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation may be reordered on other architectures than x86.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
#endif /* __CR_ASM_BITOPS_H__ */

View file

@ -0,0 +1,6 @@
#ifndef __CR_BITSPERLONG_H__
#define __CR_BITSPERLONG_H__
#define BITS_PER_LONG 64
#endif /* __CR_BITSPERLONG_H__ */

View file

@ -0,0 +1,23 @@
#ifndef __CR_LINKAGE_H__
#define __CR_LINKAGE_H__
#ifdef __ASSEMBLY__
#define __ALIGN .align 4, 0x00
#define __ALIGN_STR ".align 4, 0x00"
#define GLOBAL(name) \
.globl name; \
name:
#define ENTRY(name) \
.globl name; \
.type name, @function; \
__ALIGN; \
name:
#define END(sym) .size sym, .- sym
#endif /* __ASSEMBLY__ */
#endif /* __CR_LINKAGE_H__ */

View file

@ -0,0 +1,44 @@
#ifndef __CR_ASM_PAGE_H__
#define __CR_ASM_PAGE_H__
#define ARCH_HAS_LONG_PAGES
#ifndef CR_NOGLIBC
#include <string.h> /* ffsl() */
#include <unistd.h> /* _SC_PAGESIZE */
extern unsigned __page_size;
extern unsigned __page_shift;
static inline unsigned page_size(void)
{
if (!__page_size)
__page_size = sysconf(_SC_PAGESIZE);
return __page_size;
}
static inline unsigned page_shift(void)
{
if (!__page_shift)
__page_shift = (ffsl(page_size()) - 1);
return __page_shift;
}
/*
* Don't add ifdefs for PAGE_SIZE: if any header defines it as a constant
* on aarch64, then we need refrain using PAGE_SIZE in criu and use
* page_size() across sources (as it may differ on aarch64).
*/
#define PAGE_SIZE page_size()
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_SHIFT page_shift()
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
#else /* CR_NOGLIBC */
extern unsigned long page_size(void);
#define PAGE_SIZE page_size()
#endif /* CR_NOGLIBC */
#endif /* __CR_ASM_PAGE_H__ */