Merge pull request #308 from gwsystems/valgrind-nits

Valgrind nits
master
Sean McBride 3 years ago committed by GitHub
commit 02187fefc5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -40,7 +40,7 @@ current_sandbox_send_response()
sandbox->total_time = end_time - sandbox->timestamp_of.request_arrival; sandbox->total_time = end_time - sandbox->timestamp_of.request_arrival;
/* Send HTTP Response Header and Body */ /* Send HTTP Response Header and Body */
rc = http_header_200_write(sandbox->client_socket_descriptor, module_content_type, response_body_size); rc = http_header_200_write(sandbox->client_socket_descriptor, content_type, response_body_size);
if (rc < 0) goto err; if (rc < 0) goto err;
rc = client_socket_send(sandbox->client_socket_descriptor, (const char *)response->buffer, response_body_size, rc = client_socket_send(sandbox->client_socket_descriptor, (const char *)response->buffer, response_body_size,

@ -21,8 +21,8 @@ perf_window_initialize(struct perf_window *perf_window)
LOCK_INIT(&perf_window->lock); LOCK_INIT(&perf_window->lock);
perf_window->count = 0; perf_window->count = 0;
memset(&perf_window->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE); memset(perf_window->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE);
memset(&perf_window->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE); memset(perf_window->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE);
} }

@ -273,9 +273,8 @@ priority_queue_initialize(size_t capacity, bool use_lock, priority_queue_get_pri
/* Add one to capacity because this data structure ignores the element at 0 */ /* Add one to capacity because this data structure ignores the element at 0 */
size_t one_based_capacity = capacity + 1; size_t one_based_capacity = capacity + 1;
struct priority_queue *priority_queue = (struct priority_queue *)calloc(sizeof(struct priority_queue) struct priority_queue *priority_queue = (struct priority_queue *)
+ sizeof(void *) * one_based_capacity, calloc(1, sizeof(struct priority_queue) + sizeof(void *) * one_based_capacity);
1);
/* We're assuming a min-heap implementation, so set to larget possible value */ /* We're assuming a min-heap implementation, so set to larget possible value */

@ -18,8 +18,7 @@ static inline void
sandbox_state_history_init(struct sandbox_state_history *sandbox_state_history) sandbox_state_history_init(struct sandbox_state_history *sandbox_state_history)
{ {
#ifdef LOG_STATE_CHANGES #ifdef LOG_STATE_CHANGES
memset(sandbox_state_history, 0, memset(sandbox_state_history, 0, sizeof(struct sandbox_state_history));
sizeof(struct sandbox_state_history) + SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
#endif #endif
} }

@ -4,6 +4,12 @@
#include <stdio.h> #include <stdio.h>
#include <threads.h> #include <threads.h>
#define PAGE_SIZE (unsigned long)(1 << 12)
#define CACHE_LINE 64
/* This might be Intel specific. ARM and x64 both have the same CACHE_LINE size, but x64 uses Intel uses a double
* cache-line as a coherency unit */
#define CACHE_PAD (CACHE_LINE * 2)
/* For this family of macros, do NOT pass zero as the pow2 */ /* For this family of macros, do NOT pass zero as the pow2 */
#define round_to_pow2(x, pow2) (((unsigned long)(x)) & (~((pow2)-1))) #define round_to_pow2(x, pow2) (((unsigned long)(x)) & (~((pow2)-1)))
#define round_up_to_pow2(x, pow2) (round_to_pow2(((unsigned long)(x)) + (pow2)-1, (pow2))) #define round_up_to_pow2(x, pow2) (round_to_pow2(((unsigned long)(x)) + (pow2)-1, (pow2)))
@ -15,13 +21,8 @@
#define IMPORT __attribute__((visibility("default"))) #define IMPORT __attribute__((visibility("default")))
#define INLINE __attribute__((always_inline)) #define INLINE __attribute__((always_inline))
#define PAGE_ALIGNED __attribute__((aligned(PAGE_SIZE))) #define PAGE_ALIGNED __attribute__((aligned(PAGE_SIZE)))
#define PAGE_SIZE (unsigned long)(1 << 12)
#define WEAK __attribute__((weak)) #define WEAK __attribute__((weak))
#define CACHE_LINE 64
/* This might be Intel specific. ARM and x64 both have the same CACHE_LINE size, but x64 uses Intel uses a double
* cache-line as a coherency unit */
#define CACHE_PAD (CACHE_LINE * 2)
#ifndef unlikely #ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0) #define unlikely(x) __builtin_expect(!!(x), 0)

@ -44,7 +44,7 @@
*/ \ */ \
static inline struct vec_##TYPE *vec_##TYPE##_alloc(size_t capacity) \ static inline struct vec_##TYPE *vec_##TYPE##_alloc(size_t capacity) \
{ \ { \
struct vec_##TYPE *vec = (struct vec_##TYPE *)malloc(sizeof(struct vec_##TYPE)); \ struct vec_##TYPE *vec = (struct vec_##TYPE *)calloc(1, sizeof(struct vec_##TYPE)); \
if (vec == NULL) return vec; \ if (vec == NULL) return vec; \
\ \
int rc = vec_##TYPE##_init(vec, capacity); \ int rc = vec_##TYPE##_init(vec, capacity); \

@ -3,6 +3,7 @@
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <string.h>
#include "sandbox_types.h" #include "sandbox_types.h"
#include "types.h" #include "types.h"
@ -55,16 +56,18 @@ wasm_stack_init(struct wasm_stack *wasm_stack, uint64_t capacity)
goto err_stack_allocation_failed; goto err_stack_allocation_failed;
} }
wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity, wasm_stack->low = wasm_stack->buffer + /* guard page */ PAGE_SIZE;
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); wasm_stack->capacity = capacity;
if (unlikely(wasm_stack->low == MAP_FAILED)) { wasm_stack->high = wasm_stack->low + capacity;
/* Set the initial bytes to read / write */
rc = mprotect(wasm_stack->low, capacity, PROT_READ | PROT_WRITE);
if (unlikely(rc != 0)) {
perror("sandbox set stack read/write"); perror("sandbox set stack read/write");
goto err_stack_prot_failed; goto err_stack_prot_failed;
} }
ps_list_init_d(wasm_stack); ps_list_init_d(wasm_stack);
wasm_stack->capacity = capacity;
wasm_stack->high = wasm_stack->low + capacity;
rc = 0; rc = 0;
done: done:

@ -38,8 +38,8 @@ wasm_table_init(struct sledge_abi__wasm_table *wasm_table, size_t capacity)
static INLINE struct sledge_abi__wasm_table * static INLINE struct sledge_abi__wasm_table *
wasm_table_alloc(size_t capacity) wasm_table_alloc(size_t capacity)
{ {
struct sledge_abi__wasm_table *wasm_table = (struct sledge_abi__wasm_table *)malloc( struct sledge_abi__wasm_table *wasm_table = (struct sledge_abi__wasm_table *)
sizeof(struct sledge_abi__wasm_table)); calloc(1, sizeof(struct sledge_abi__wasm_table));
if (wasm_table == NULL) return NULL; if (wasm_table == NULL) return NULL;
int rc = wasm_table_init(wasm_table, capacity); int rc = wasm_table_init(wasm_table, capacity);

@ -52,7 +52,7 @@ void
global_request_scheduler_deque_initialize() global_request_scheduler_deque_initialize()
{ {
/* Allocate and Initialize the global deque */ /* Allocate and Initialize the global deque */
global_request_scheduler_deque = (struct deque_sandbox *)malloc(sizeof(struct deque_sandbox)); global_request_scheduler_deque = (struct deque_sandbox *)calloc(1, sizeof(struct deque_sandbox));
assert(global_request_scheduler_deque); assert(global_request_scheduler_deque);
/* Note: Below is a Macro */ /* Note: Below is a Macro */
deque_init_sandbox(global_request_scheduler_deque, GLOBAL_REQUEST_SCHEDULER_DEQUE_CAPACITY); deque_init_sandbox(global_request_scheduler_deque, GLOBAL_REQUEST_SCHEDULER_DEQUE_CAPACITY);

@ -31,7 +31,7 @@ wasi_context_init(wasi_options_t *options)
/* TODO: Add default types */ /* TODO: Add default types */
assert(options != NULL); assert(options != NULL);
wasi_context_t *wasi_context = (wasi_context_t *)malloc(sizeof(wasi_context_t)); wasi_context_t *wasi_context = (wasi_context_t *)calloc(1, sizeof(wasi_context_t));
if (options->argc > 0) { if (options->argc > 0) {
assert(options->argv != NULL); assert(options->argv != NULL);

@ -95,9 +95,12 @@ runtime_set_resource_limits_to_max()
void void
runtime_initialize(void) runtime_initialize(void)
{ {
runtime_worker_threads = calloc(runtime_worker_threads_count, sizeof(pthread_t)); runtime_worker_threads = calloc(runtime_worker_threads_count, sizeof(pthread_t));
assert(runtime_worker_threads != NULL);
runtime_worker_threads_argument = calloc(runtime_worker_threads_count, sizeof(int)); runtime_worker_threads_argument = calloc(runtime_worker_threads_count, sizeof(int));
assert(runtime_worker_threads_argument != NULL);
runtime_worker_threads_deadline = malloc(runtime_worker_threads_count * sizeof(uint64_t)); runtime_worker_threads_deadline = malloc(runtime_worker_threads_count * sizeof(uint64_t));
assert(runtime_worker_threads_deadline != NULL);
memset(runtime_worker_threads_deadline, UINT8_MAX, runtime_worker_threads_count * sizeof(uint64_t)); memset(runtime_worker_threads_deadline, UINT8_MAX, runtime_worker_threads_count * sizeof(uint64_t));
http_total_init(); http_total_init();

@ -112,8 +112,7 @@ sandbox_prepare_execution_environment(struct sandbox *sandbox)
{ {
assert(sandbox != NULL); assert(sandbox != NULL);
char *error_message = ""; char *error_message = "";
uint64_t now = __getcycles();
int rc; int rc;

@ -168,7 +168,7 @@ sledge_abi__wasi_snapshot_preview1_args_get(__wasi_size_t argv_retoffset, __wasi
/* args_get backings return a vector of host pointers. We need a host buffer to store this /* args_get backings return a vector of host pointers. We need a host buffer to store this
* temporarily before unswizzling and writing to linear memory */ * temporarily before unswizzling and writing to linear memory */
char **argv_temp = calloc(sizeof(char *), argc); char **argv_temp = calloc(argc, sizeof(char *));
if (unlikely(argv_temp == NULL)) { goto done; } if (unlikely(argv_temp == NULL)) { goto done; }
/* Writes argv_buf to linear memory and argv vector to our temporary buffer */ /* Writes argv_buf to linear memory and argv vector to our temporary buffer */
@ -284,7 +284,7 @@ sledge_abi__wasi_snapshot_preview1_environ_get(__wasi_size_t env_retoffset, __wa
* these results to environ_temp temporarily before converting to offsets and writing to * these results to environ_temp temporarily before converting to offsets and writing to
* linear memory. We could technically write this to linear memory and then do a "fix up," * linear memory. We could technically write this to linear memory and then do a "fix up,"
* but this would leak host information and constitue a security issue */ * but this would leak host information and constitue a security issue */
char **env_temp = calloc(sizeof(char *), envc); char **env_temp = calloc(envc, sizeof(char *));
if (unlikely(env_temp == NULL)) { goto done; } if (unlikely(env_temp == NULL)) { goto done; }
__wasi_size_t *env_retptr = (__wasi_size_t *)get_memory_ptr_for_runtime(env_retoffset, __wasi_size_t *env_retptr = (__wasi_size_t *)get_memory_ptr_for_runtime(env_retoffset,

Loading…
Cancel
Save