s390:compel/arch/s390: Add architecture support to compel tool and libraries

This patch only adds the support but does not enable it for building.

Reviewed-by: Alice Frosi <alice@linux.vnet.ibm.com>
Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Reviewed-by: Dmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
This commit is contained in:
Michael Holzheu 2017-06-30 20:31:36 +02:00 committed by Pavel Emelyanov
parent 49d8cd0977
commit 7ce8f56be2
25 changed files with 1516 additions and 0 deletions

View file

@ -0,0 +1,67 @@
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__
#include "common/arch/s390/asm/atomic_ops.h"
#include "common/compiler.h"
#define ATOMIC_INIT(i) { (i) }
typedef struct {
int counter;
} atomic_t;
static inline int atomic_read(const atomic_t *v)
{
int c;
asm volatile(
" l %0,%1\n"
: "=d" (c) : "Q" (v->counter));
return c;
}
static inline void atomic_set(atomic_t *v, int i)
{
asm volatile(
" st %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter) + i;
}
static inline void atomic_add(int i, atomic_t *v)
{
__atomic_add(i, &v->counter);
}
#define atomic_inc(_v) atomic_add(1, _v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
__atomic_##op(i, &v->counter); \
} \
ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
#undef ATOMIC_OPS
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return __atomic_cmpxchg(&v->counter, old, new);
}
#endif /* __ARCH_S390_ATOMIC__ */

View file

@ -0,0 +1,74 @@
#ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__
#define __ATOMIC_OP(op_name, op_string) \
static inline int op_name(int val, int *ptr) \
{ \
int old, new; \
\
asm volatile( \
"0: lr %[new],%[old]\n" \
op_string " %[new],%[val]\n" \
" cs %[old],%[new],%[ptr]\n" \
" jl 0b" \
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
return old; \
}
#define __ATOMIC_OPS(op_name, op_string) \
__ATOMIC_OP(op_name, op_string) \
__ATOMIC_OP(op_name##_barrier, op_string)
__ATOMIC_OPS(__atomic_add, "ar")
__ATOMIC_OPS(__atomic_and, "nr")
__ATOMIC_OPS(__atomic_or, "or")
__ATOMIC_OPS(__atomic_xor, "xr")
#undef __ATOMIC_OPS
#define __ATOMIC64_OP(op_name, op_string) \
static inline long op_name(long val, long *ptr) \
{ \
long old, new; \
\
asm volatile( \
"0: lgr %[new],%[old]\n" \
op_string " %[new],%[val]\n" \
" csg %[old],%[new],%[ptr]\n" \
" jl 0b" \
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
return old; \
}
#define __ATOMIC64_OPS(op_name, op_string) \
__ATOMIC64_OP(op_name, op_string) \
__ATOMIC64_OP(op_name##_barrier, op_string)
__ATOMIC64_OPS(__atomic64_add, "agr")
__ATOMIC64_OPS(__atomic64_and, "ngr")
__ATOMIC64_OPS(__atomic64_or, "ogr")
__ATOMIC64_OPS(__atomic64_xor, "xgr")
#undef __ATOMIC64_OPS
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
asm volatile(
" cs %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new) : "cc", "memory");
return old;
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
asm volatile(
" csg %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new) : "cc", "memory");
return old;
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */

View file

@ -0,0 +1,164 @@
#ifndef _S390_BITOPS_H
#define _S390_BITOPS_H
#include "common/asm/bitsperlong.h"
#include "common/compiler.h"
#include "common/arch/s390/asm/atomic_ops.h"
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
static inline unsigned long *
__bitops_word(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
return (unsigned long *)addr;
}
static inline unsigned char *
__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
{
return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
}
static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_or((long) mask, (long *) addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
__atomic64_and((long) mask, (long *) addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_xor((long) mask, (long *) addr);
}
static inline int
test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __atomic64_or_barrier((long) mask, (long *) addr);
return (old & mask) != 0;
}
static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
{
const volatile unsigned char *addr;
addr = ((const volatile unsigned char *)ptr);
addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
return (*addr >> (nr & 7)) & 1;
}
static inline unsigned char __flogr(unsigned long word)
{
if (__builtin_constant_p(word)) {
unsigned long bit = 0;
if (!word)
return 64;
if (!(word & 0xffffffff00000000UL)) {
word <<= 32;
bit += 32;
}
if (!(word & 0xffff000000000000UL)) {
word <<= 16;
bit += 16;
}
if (!(word & 0xff00000000000000UL)) {
word <<= 8;
bit += 8;
}
if (!(word & 0xf000000000000000UL)) {
word <<= 4;
bit += 4;
}
if (!(word & 0xc000000000000000UL)) {
word <<= 2;
bit += 2;
}
if (!(word & 0x8000000000000000UL)) {
word <<= 1;
bit += 1;
}
return bit;
} else {
register unsigned long bit asm("4") = word;
register unsigned long out asm("5");
asm volatile(
" flogr %[bit],%[bit]\n"
: [bit] "+d" (bit), [out] "=d" (out) : : "cc");
return bit;
}
}
static inline unsigned long __ffs(unsigned long word)
{
return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
}
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
static inline unsigned long _find_next_bit(const unsigned long *addr,
unsigned long nbits, unsigned long start,
unsigned long invert)
{
unsigned long tmp;
if (!nbits || start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
tmp &= BITMAP_FIRST_WORD_MASK(start);
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
}
return min(start + __ffs(tmp), nbits);
}
static inline unsigned long find_next_bit(const unsigned long *addr,
unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, size, offset, 0UL);
}
#define for_each_bit(i, bitmask) \
for (i = find_next_bit(bitmask, sizeof(bitmask), 0); \
i < sizeof(bitmask); \
i = find_next_bit(bitmask, sizeof(bitmask), i + 1))
#endif /* _S390_BITOPS_H */

View file

@ -0,0 +1,6 @@
#ifndef __CR_BITSPERLONG_H__
#define __CR_BITSPERLONG_H__
#define BITS_PER_LONG 64
#endif /* __CR_BITSPERLONG_H__ */

View file

@ -0,0 +1,22 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#ifdef __ASSEMBLY__
#define __ALIGN .align 4, 0x07
#define GLOBAL(name) \
.globl name; \
name:
#define ENTRY(name) \
.globl name; \
.type name, @function; \
__ALIGN; \
name:
#define END(name) \
.size name, . - name
#endif /* __ASSEMBLY__ */
#endif

View file

@ -0,0 +1,19 @@
#ifndef __CR_ASM_PAGE_H__
#define __CR_ASM_PAGE_H__
#ifndef PAGE_SHIFT
#define PAGE_SHIFT 12
#endif
#ifndef PAGE_SIZE
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif
#ifndef PAGE_MASK
#define PAGE_MASK (~(PAGE_SIZE - 1))
#endif
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
#define page_size() PAGE_SIZE
#endif /* __CR_ASM_PAGE_H__ */