TODO: use CK? for env.c

main
phani 5 years ago
parent 845468baa6
commit 37db945637

@ -16,7 +16,7 @@ CFILES += ${RTDIR}/arch/${ARCH}/*.c
#CFLAGS += -DDEBUG #CFLAGS += -DDEBUG
CFLAGS += -D_GNU_SOURCE CFLAGS += -D_GNU_SOURCE
#CFLAGS += -DLOG_TO_FILE #CFLAGS += -DLOG_TO_FILE
CFLAGS += -DUSE_HTTP_UVIO #-DUSE_HTTP_SYNC #CFLAGS += -DUSE_HTTP_UVIO #-DUSE_HTTP_SYNC
#CFLAGS += -DUSE_SYSCALL #CFLAGS += -DUSE_SYSCALL
#CFLAGS += -DPREEMPT_DISABLE #CFLAGS += -DPREEMPT_DISABLE
CACHE_LINESIZE := $(shell getconf LEVEL1_DCACHE_LINESIZE) CACHE_LINESIZE := $(shell getconf LEVEL1_DCACHE_LINESIZE)

@ -10,10 +10,4 @@ __getcycles(void)
return virtual_timer_value; return virtual_timer_value;
} }
INLINE unsigned long long
env_getcycles(void)
{
return __getcycles();
}
#endif #endif

@ -2,158 +2,6 @@
#include <runtime.h> #include <runtime.h>
// Atomic functions, with definitions stolen from musl
i32
env_a_ctz_64(u64 x)
{
__asm__("bsf %1,%0" : "=r"(x) : "r"(x));
return x;
}
// static inline int a_ctz_l(unsigned long x)
//{
// __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
// return x;
//}
INLINE void
env_a_and_64(i32 p_off, u64 v)
{
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(uint64_t));
*p &= v;
// __asm__( "lock ; and %1, %0"
// : "=m"(*p) : "r"(v) : "memory" );
}
INLINE void
env_a_or_64(i32 p_off, i64 v)
{
assert(sizeof(i64) == sizeof(uint64_t));
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i64));
*p |= v;
// __asm__( "lock ; or %1, %0"
// : "=m"(*p) : "r"(v) : "memory" );
}
// static inline void a_or_l(volatile void *p, long v)
//{
// __asm__( "lock ; or %1, %0"
// : "=m"(*(long *)p) : "r"(v) : "memory" );
//}
//
// static inline void *a_cas_p(volatile void *p, void *t, void *s)
//{
// __asm__( "lock ; cmpxchg %3, %1"
// : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
// return t;
//}
//
i32
env_a_cas(i32 p_off, i32 t, i32 s)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
__asm__("lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory");
return t;
}
void
env_a_or(i32 p_off, i32 v)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
__asm__("lock ; or %1, %0" : "=m"(*p) : "r"(v) : "memory");
}
// static inline void a_and(volatile int *p, int v)
//{
// __asm__( "lock ; and %1, %0"
// : "=m"(*p) : "r"(v) : "memory" );
//}
i32
env_a_swap(i32 x_off, i32 v)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
__asm__("xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
return v;
}
i32
env_a_fetch_add(i32 x_off, i32 v)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
__asm__("lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
return v;
}
void
env_a_inc(i32 x_off)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
__asm__("lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory");
}
void
env_a_dec(i32 x_off)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
__asm__("lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory");
}
void
env_a_store(i32 p_off, i32 x)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
__asm__ __volatile__("mov %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory");
}
void
env_do_spin(i32 i)
{
__asm__ __volatile__("pause" : : : "memory");
}
void
env_do_crash(i32 i)
{
printf("crashing: %d\n", i);
assert(0);
}
int
env_a_ctz_32(i32 x)
{
__asm__("bsf %1,%0" : "=r"(x) : "r"(x));
return x;
}
void
env_do_barrier(i32 x)
{
__asm__ __volatile__("" : : : "memory");
}
unsigned long long int unsigned long long int
__getcycles(void) __getcycles(void)
{ {
@ -166,10 +14,4 @@ __getcycles(void)
return cpu_time_in_cycles; return cpu_time_in_cycles;
} }
INLINE unsigned long long
env_getcycles(void)
{
return __getcycles();
}
#endif #endif

@ -17,13 +17,129 @@ env___syscall(i32 n, i32 a, i32 b, i32 c, i32 d, i32 e, i32 f)
return env_syscall_handler(n, a, b, c, d, e, f); return env_syscall_handler(n, a, b, c, d, e, f);
} }
void void
env___unmapself(u32 base, u32 size) env___unmapself(u32 base, u32 size)
{ {
// Just do some no op // Just do some no op
} }
i32
env_a_ctz_64(u64 x)
{
return __builtin_ctzll(x);
}
INLINE void
env_a_and_64(i32 p_off, u64 v)
{
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(uint64_t));
*p &= v;
}
INLINE void
env_a_or_64(i32 p_off, i64 v)
{
assert(sizeof(i64) == sizeof(uint64_t));
uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i64));
*p |= v;
}
i32
env_a_cas(i32 p_off, i32 t, i32 s)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
return __sync_val_compare_and_swap(p, t, s);
}
void
env_a_or(i32 p_off, i32 v)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
//__asm__("lock ; or %1, %0" : "=m"(*p) : "r"(v) : "memory");
*p |= v;
}
i32
env_a_swap(i32 x_off, i32 v)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
//__asm__("xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
int t = *x;
*x = v;
v = t;
return v;
}
i32
env_a_fetch_add(i32 x_off, i32 v)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
//__asm__("lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
return __sync_fetch_and_add(x, v);
}
void
env_a_inc(i32 x_off)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
(*x)++;
//__asm__("lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory");
}
void
env_a_dec(i32 x_off)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32));
(*x)--;
//__asm__("lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory");
}
void
env_a_store(i32 p_off, i32 x)
{
assert(sizeof(i32) == sizeof(volatile int));
volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32));
*p = x;
//__asm__ __volatile__("mov %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory");
}
int
env_a_ctz_32(i32 x)
{
return __builtin_ctz(x);
}
void
env_do_spin(i32 i)
{
printf("nope! not happening: %d\n", i);
assert(0);
}
void
env_do_crash(i32 i)
{
printf("crashing: %d\n", i);
assert(0);
}
void
env_do_barrier(i32 x)
{
__sync_synchronize();
}
// Floating point routines // Floating point routines
INLINE double INLINE double
@ -38,3 +154,9 @@ env_cos(double d)
return cos(d); return cos(d);
} }
INLINE unsigned long long
env_getcycles(void)
{
return __getcycles();
}

@ -20,7 +20,7 @@ RT_DIR=${BASE_DIR}/runtime/
RT_MEM=${RT_DIR}/memory/ RT_MEM=${RT_DIR}/memory/
RT_LIBC=${RT_DIR}/libc/libc_backing.c RT_LIBC=${RT_DIR}/libc/libc_backing.c
RT_RT=${RT_DIR}/runtime.c RT_RT=${RT_DIR}/runtime.c
RT_ENV=${RT_DIR}/libc/env_${ARCH}.c RT_ENV=${RT_DIR}/libc/env.c ${RT_DIR}/libc/env_${ARCH}.c
MEMC=${RT_MEM}/${MEMC_64} MEMC=${RT_MEM}/${MEMC_64}
DUMMY=${BASE_DIR}/code_benches/dummy.c DUMMY=${BASE_DIR}/code_benches/dummy.c

@ -1 +1 @@
Subproject commit 54d098796036ee625688ec0f2796c07f5aecc89c Subproject commit e270b5edb24cf3a7d9cdc162eb4d6945cdabf1b5
Loading…
Cancel
Save