* changes to be fully tested! * cas etc needs to be implemented for aarch64sledge_graph
parent
bcc0aa2063
commit
7ff05b9d73
@ -1,20 +0,0 @@
|
||||
#ifndef SFRT_UTIL_H
|
||||
#define SFRT_UTIL_H
|
||||
|
||||
/**
|
||||
* Get CPU time in cycles using the Intel instruction rdtsc
|
||||
* @return CPU time in cycles
|
||||
**/
|
||||
static unsigned long long int
|
||||
util__rdtsc(void)
|
||||
{
|
||||
unsigned long long int cpu_time_in_cycles = 0;
|
||||
unsigned int cycles_lo;
|
||||
unsigned int cycles_hi;
|
||||
__asm__ volatile("rdtsc" : "=a"(cycles_lo), "=d"(cycles_hi));
|
||||
cpu_time_in_cycles = (unsigned long long int)cycles_hi << 32 | cycles_lo;
|
||||
|
||||
return cpu_time_in_cycles;
|
||||
}
|
||||
|
||||
#endif /* SFRT_UTIL_H */
|
@ -0,0 +1,19 @@
|
||||
#if defined(AARCH64) || defined(aarch64)
|
||||
|
||||
#include <runtime.h>
|
||||
|
||||
unsigned long long int
|
||||
__getcycles(void)
|
||||
{
|
||||
unsigned long long virtual_timer_value;
|
||||
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
|
||||
return virtual_timer_value;
|
||||
}
|
||||
|
||||
INLINE unsigned long long
|
||||
env_getcycles(void)
|
||||
{
|
||||
return __getcycles();
|
||||
}
|
||||
|
||||
#endif
|
@ -0,0 +1,175 @@
|
||||
#if defined(X86_64) || defined(x86_64)
|
||||
|
||||
#include <runtime.h>
|
||||
|
||||
// Atomic functions, with definitions stolen from musl
|
||||
|
||||
i32
|
||||
env_a_ctz_64(u64 x)
|
||||
{
|
||||
__asm__("bsf %1,%0" : "=r"(x) : "r"(x));
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
// static inline int a_ctz_l(unsigned long x)
|
||||
//{
|
||||
// __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
|
||||
// return x;
|
||||
//}
|
||||
|
||||
INLINE void
|
||||
env_a_and_64(i32 p_off, u64 v)
|
||||
{
|
||||
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(uint64_t));
|
||||
*p &= v;
|
||||
|
||||
// __asm__( "lock ; and %1, %0"
|
||||
// : "=m"(*p) : "r"(v) : "memory" );
|
||||
}
|
||||
|
||||
INLINE void
|
||||
env_a_or_64(i32 p_off, i64 v)
|
||||
{
|
||||
assert(sizeof(i64) == sizeof(uint64_t));
|
||||
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i64));
|
||||
*p |= v;
|
||||
|
||||
// __asm__( "lock ; or %1, %0"
|
||||
// : "=m"(*p) : "r"(v) : "memory" );
|
||||
}
|
||||
|
||||
// static inline void a_or_l(volatile void *p, long v)
|
||||
//{
|
||||
// __asm__( "lock ; or %1, %0"
|
||||
// : "=m"(*(long *)p) : "r"(v) : "memory" );
|
||||
//}
|
||||
//
|
||||
// static inline void *a_cas_p(volatile void *p, void *t, void *s)
|
||||
//{
|
||||
// __asm__( "lock ; cmpxchg %3, %1"
|
||||
// : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
|
||||
// return t;
|
||||
//}
|
||||
//
|
||||
i32
|
||||
env_a_cas(i32 p_off, i32 t, i32 s)
|
||||
{
|
||||
assert(sizeof(i32) == sizeof(volatile int));
|
||||
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
|
||||
|
||||
__asm__("lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
void
|
||||
env_a_or(i32 p_off, i32 v)
|
||||
{
|
||||
assert(sizeof(i32) == sizeof(volatile int));
|
||||
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
|
||||
__asm__("lock ; or %1, %0" : "=m"(*p) : "r"(v) : "memory");
|
||||
}
|
||||
|
||||
// static inline void a_and(volatile int *p, int v)
|
||||
//{
|
||||
// __asm__( "lock ; and %1, %0"
|
||||
// : "=m"(*p) : "r"(v) : "memory" );
|
||||
//}
|
||||
|
||||
|
||||
i32
|
||||
env_a_swap(i32 x_off, i32 v)
|
||||
{
|
||||
assert(sizeof(i32) == sizeof(volatile int));
|
||||
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
|
||||
|
||||
__asm__("xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
i32
|
||||
env_a_fetch_add(i32 x_off, i32 v)
|
||||
{
|
||||
assert(sizeof(i32) == sizeof(volatile int));
|
||||
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
|
||||
|
||||
__asm__("lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
void
|
||||
env_a_inc(i32 x_off)
|
||||
{
|
||||
assert(sizeof(i32) == sizeof(volatile int));
|
||||
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
|
||||
|
||||
__asm__("lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory");
|
||||
}
|
||||
|
||||
void
|
||||
env_a_dec(i32 x_off)
|
||||
{
|
||||
assert(sizeof(i32) == sizeof(volatile int));
|
||||
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
|
||||
|
||||
__asm__("lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory");
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
env_a_store(i32 p_off, i32 x)
|
||||
{
|
||||
assert(sizeof(i32) == sizeof(volatile int));
|
||||
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
|
||||
__asm__ __volatile__("mov %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory");
|
||||
}
|
||||
|
||||
void
|
||||
env_do_spin(i32 i)
|
||||
{
|
||||
__asm__ __volatile__("pause" : : : "memory");
|
||||
}
|
||||
|
||||
void
|
||||
env_do_crash(i32 i)
|
||||
{
|
||||
printf("crashing: %d\n", i);
|
||||
assert(0);
|
||||
}
|
||||
|
||||
int
|
||||
env_a_ctz_32(i32 x)
|
||||
{
|
||||
__asm__("bsf %1,%0" : "=r"(x) : "r"(x));
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
void
|
||||
env_do_barrier(i32 x)
|
||||
{
|
||||
__asm__ __volatile__("" : : : "memory");
|
||||
}
|
||||
|
||||
unsigned long long int
|
||||
__getcycles(void)
|
||||
{
|
||||
unsigned long long int cpu_time_in_cycles = 0;
|
||||
unsigned int cycles_lo;
|
||||
unsigned int cycles_hi;
|
||||
__asm__ volatile("rdtsc" : "=a"(cycles_lo), "=d"(cycles_hi));
|
||||
cpu_time_in_cycles = (unsigned long long int)cycles_hi << 32 | cycles_lo;
|
||||
|
||||
return cpu_time_in_cycles;
|
||||
}
|
||||
|
||||
INLINE unsigned long long
|
||||
env_getcycles(void)
|
||||
{
|
||||
return __getcycles();
|
||||
}
|
||||
|
||||
#endif
|
@ -1 +1 @@
|
||||
Subproject commit 507ef4b2a14f45ccb246a202b485d175116e0b80
|
||||
Subproject commit a44fe4566e1cca6fba95852e077311e5d3e21226
|
Loading…
Reference in new issue