diff --git a/runtime/Makefile b/runtime/Makefile index f8cf33e..859ea76 100644 --- a/runtime/Makefile +++ b/runtime/Makefile @@ -16,7 +16,7 @@ CFILES += ${RTDIR}/arch/${ARCH}/*.c #CFLAGS += -DDEBUG CFLAGS += -D_GNU_SOURCE #CFLAGS += -DLOG_TO_FILE -CFLAGS += -DUSE_HTTP_UVIO #-DUSE_HTTP_SYNC +#CFLAGS += -DUSE_HTTP_UVIO #-DUSE_HTTP_SYNC #CFLAGS += -DUSE_SYSCALL #CFLAGS += -DPREEMPT_DISABLE CACHE_LINESIZE := $(shell getconf LEVEL1_DCACHE_LINESIZE) diff --git a/runtime/src/arch/aarch64/env.c b/runtime/src/arch/aarch64/env.c index 334288d..3949e82 100644 --- a/runtime/src/arch/aarch64/env.c +++ b/runtime/src/arch/aarch64/env.c @@ -10,10 +10,4 @@ __getcycles(void) return virtual_timer_value; } -INLINE unsigned long long -env_getcycles(void) -{ - return __getcycles(); -} - #endif diff --git a/runtime/src/arch/x86_64/env.c b/runtime/src/arch/x86_64/env.c index fec02f7..3ec3dcd 100644 --- a/runtime/src/arch/x86_64/env.c +++ b/runtime/src/arch/x86_64/env.c @@ -2,158 +2,6 @@ #include -// Atomic functions, with definitions stolen from musl - -i32 -env_a_ctz_64(u64 x) -{ - __asm__("bsf %1,%0" : "=r"(x) : "r"(x)); - - return x; -} - -// static inline int a_ctz_l(unsigned long x) -//{ -// __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) ); -// return x; -//} - -INLINE void -env_a_and_64(i32 p_off, u64 v) -{ - uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(uint64_t)); - *p &= v; - - // __asm__( "lock ; and %1, %0" - // : "=m"(*p) : "r"(v) : "memory" ); -} - -INLINE void -env_a_or_64(i32 p_off, i64 v) -{ - assert(sizeof(i64) == sizeof(uint64_t)); - uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i64)); - *p |= v; - - // __asm__( "lock ; or %1, %0" - // : "=m"(*p) : "r"(v) : "memory" ); -} - -// static inline void a_or_l(volatile void *p, long v) -//{ -// __asm__( "lock ; or %1, %0" -// : "=m"(*(long *)p) : "r"(v) : "memory" ); -//} -// -// static inline void *a_cas_p(volatile void *p, void *t, void *s) -//{ -// __asm__( "lock ; cmpxchg %3, %1" -// : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); -// return t; -//} -// -i32 -env_a_cas(i32 p_off, i32 t, i32 s) -{ - assert(sizeof(i32) == sizeof(volatile int)); - volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32)); - - __asm__("lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory"); - - return t; -} - -void -env_a_or(i32 p_off, i32 v) -{ - assert(sizeof(i32) == sizeof(volatile int)); - volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32)); - __asm__("lock ; or %1, %0" : "=m"(*p) : "r"(v) : "memory"); -} - -// static inline void a_and(volatile int *p, int v) -//{ -// __asm__( "lock ; and %1, %0" -// : "=m"(*p) : "r"(v) : "memory" ); -//} - - -i32 -env_a_swap(i32 x_off, i32 v) -{ - assert(sizeof(i32) == sizeof(volatile int)); - volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); - - __asm__("xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory"); - - return v; -} - -i32 -env_a_fetch_add(i32 x_off, i32 v) -{ - assert(sizeof(i32) == sizeof(volatile int)); - volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); - - __asm__("lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory"); - - return v; -} - -void -env_a_inc(i32 x_off) -{ - assert(sizeof(i32) == sizeof(volatile int)); - volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); - - __asm__("lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory"); -} - -void -env_a_dec(i32 x_off) -{ - assert(sizeof(i32) == sizeof(volatile int)); - volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); - - __asm__("lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory"); -} - - -void -env_a_store(i32 p_off, i32 x) -{ - assert(sizeof(i32) == sizeof(volatile int)); - volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32)); - __asm__ __volatile__("mov %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory"); -} - -void -env_do_spin(i32 i) -{ - __asm__ __volatile__("pause" : : : "memory"); -} - -void -env_do_crash(i32 i) -{ - printf("crashing: %d\n", i); - assert(0); -} - -int -env_a_ctz_32(i32 x) -{ - __asm__("bsf %1,%0" : "=r"(x) : "r"(x)); - - return x; -} - -void -env_do_barrier(i32 x) -{ - __asm__ __volatile__("" : : : "memory"); -} - unsigned long long int __getcycles(void) { @@ -166,10 +14,4 @@ __getcycles(void) return cpu_time_in_cycles; } -INLINE unsigned long long -env_getcycles(void) -{ - return __getcycles(); -} - #endif diff --git a/runtime/src/env.c b/runtime/src/env.c index 128b9e6..93c0a6e 100644 --- a/runtime/src/env.c +++ b/runtime/src/env.c @@ -17,13 +17,129 @@ env___syscall(i32 n, i32 a, i32 b, i32 c, i32 d, i32 e, i32 f) return env_syscall_handler(n, a, b, c, d, e, f); } - void env___unmapself(u32 base, u32 size) { // Just do some no op } +i32 +env_a_ctz_64(u64 x) +{ + return __builtin_ctzll(x); +} + +INLINE void +env_a_and_64(i32 p_off, u64 v) +{ + uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(uint64_t)); + *p &= v; +} + +INLINE void +env_a_or_64(i32 p_off, i64 v) +{ + assert(sizeof(i64) == sizeof(uint64_t)); + uint64_t *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i64)); + *p |= v; +} + +i32 +env_a_cas(i32 p_off, i32 t, i32 s) +{ + assert(sizeof(i32) == sizeof(volatile int)); + volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32)); + + return __sync_val_compare_and_swap(p, t, s); +} + +void +env_a_or(i32 p_off, i32 v) +{ + assert(sizeof(i32) == sizeof(volatile int)); + volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32)); + //__asm__("lock ; or %1, %0" : "=m"(*p) : "r"(v) : "memory"); + *p |= v; +} + +i32 +env_a_swap(i32 x_off, i32 v) +{ + assert(sizeof(i32) == sizeof(volatile int)); + volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); + + //__asm__("xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory"); + int t = *x; + *x = v; + v = t; + + return v; +} + +i32 +env_a_fetch_add(i32 x_off, i32 v) +{ + assert(sizeof(i32) == sizeof(volatile int)); + volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); + + //__asm__("lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory"); + return __sync_fetch_and_add(x, v); +} + +void +env_a_inc(i32 x_off) +{ + assert(sizeof(i32) == sizeof(volatile int)); + volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); + (*x)++; + + //__asm__("lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory"); +} + +void +env_a_dec(i32 x_off) +{ + assert(sizeof(i32) == sizeof(volatile int)); + volatile int *x = worker_thread_get_memory_ptr_void(x_off, sizeof(i32)); + (*x)--; + + //__asm__("lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory"); +} + +void +env_a_store(i32 p_off, i32 x) +{ + assert(sizeof(i32) == sizeof(volatile int)); + volatile int *p = worker_thread_get_memory_ptr_void(p_off, sizeof(i32)); + *p = x; + //__asm__ __volatile__("mov %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory"); +} + +int +env_a_ctz_32(i32 x) +{ + return __builtin_ctz(x); +} + +void +env_do_spin(i32 i) +{ + printf("nope! not happening: %d\n", i); + assert(0); +} + +void +env_do_crash(i32 i) +{ + printf("crashing: %d\n", i); + assert(0); +} + +void +env_do_barrier(i32 x) +{ + __sync_synchronize(); +} // Floating point routines INLINE double @@ -38,3 +154,9 @@ env_cos(double d) return cos(d); } +INLINE unsigned long long +env_getcycles(void) +{ + return __getcycles(); +} + diff --git a/runtime/tests/Makefile.inc b/runtime/tests/Makefile.inc index 4c9e451..6a15ce2 100644 --- a/runtime/tests/Makefile.inc +++ b/runtime/tests/Makefile.inc @@ -20,7 +20,7 @@ RT_DIR=${BASE_DIR}/runtime/ RT_MEM=${RT_DIR}/memory/ RT_LIBC=${RT_DIR}/libc/libc_backing.c RT_RT=${RT_DIR}/runtime.c -RT_ENV=${RT_DIR}/libc/env_${ARCH}.c +RT_ENV=${RT_DIR}/libc/env.c ${RT_DIR}/libc/env_${ARCH}.c MEMC=${RT_MEM}/${MEMC_64} DUMMY=${BASE_DIR}/code_benches/dummy.c diff --git a/silverfish b/silverfish index 54d0987..e270b5e 160000 --- a/silverfish +++ b/silverfish @@ -1 +1 @@ -Subproject commit 54d098796036ee625688ec0f2796c07f5aecc89c +Subproject commit e270b5edb24cf3a7d9cdc162eb4d6945cdabf1b5