diff --git a/runtime/src/libc/uvio.c b/runtime/src/libc/uvio.c index dc7fd63..45232ae 100644 --- a/runtime/src/libc/uvio.c +++ b/runtime/src/libc/uvio.c @@ -141,7 +141,7 @@ wasm_write(i32 file_descriptor, i32 buf_offset, i32 buf_size) char * buffer = worker_thread_get_memory_ptr_void(buf_offset, buf_size); struct sandbox *s = current_sandbox_get(); int l = s->module->max_response_size - s->request_response_data_length; - l = l > buf_size ? buf_size : l; + if (l > buf_size) l = buf_size; if (l == 0) return 0; memcpy(s->request_response_data + s->request_response_data_length, buffer, l); s->request_response_data_length += l; diff --git a/runtime/src/main.c b/runtime/src/main.c index 3d48a34..d3b28d5 100644 --- a/runtime/src/main.c +++ b/runtime/src/main.c @@ -81,9 +81,9 @@ runtime_allocate_available_cores() /* WORKER_THREAD_CORE_COUNT can be used as a cap on the number of cores to use, but if there are few * cores that WORKER_THREAD_CORE_COUNT, just use what is available */ u32 max_possible_workers = runtime_total_online_processors - 1; - runtime_total_worker_processors = (max_possible_workers >= WORKER_THREAD_CORE_COUNT) - ? WORKER_THREAD_CORE_COUNT - : max_possible_workers; + runtime_total_worker_processors = max_possible_workers; + if (max_possible_workers >= WORKER_THREAD_CORE_COUNT) + runtime_total_worker_processors = WORKER_THREAD_CORE_COUNT; } else { /* If single core, we'll do everything on CPUID 0 */ runtime_first_worker_processor = 0; diff --git a/runtime/src/module.c b/runtime/src/module.c index 45fd889..2552685 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -137,12 +137,16 @@ module_new(char *name, char *path, i32 argument_count, u32 stack_size, u32 max_m module->relative_deadline_us = relative_deadline_us; module->socket_descriptor = -1; module->port = port; + if (request_size == 0) request_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE; if (response_size == 0) response_size = MODULE_DEFAULT_REQUEST_RESPONSE_SIZE; - module->max_request_size = request_size; - module->max_response_size = response_size; - module->max_request_or_response_size = round_up_to_page(request_size > response_size ? request_size - : response_size); + module->max_request_size = request_size; + module->max_response_size = response_size; + if (request_size > response_size) { + module->max_request_or_response_size = round_up_to_page(request_size); + } else { + module->max_request_or_response_size = round_up_to_page(response_size); + } /* module_indirect_table is a thread-local struct */ struct indirect_table_entry *cache_tbl = module_indirect_table; diff --git a/runtime/src/sandbox_request_scheduler_fifo.c b/runtime/src/sandbox_request_scheduler_fifo.c index b7e01f0..c1f0867 100644 --- a/runtime/src/sandbox_request_scheduler_fifo.c +++ b/runtime/src/sandbox_request_scheduler_fifo.c @@ -23,7 +23,8 @@ We are unsure if the locking behavior is correct, so there may be deadlocks */ #if NCORES == 1 pthread_mutex_unlock(&runtime_global_deque_mutex); #endif - return (return_code == 0) ? sandbox_request_raw : NULL; + if (return_code != 0) return NULL; + return sandbox_request_raw; } /** diff --git a/runtime/src/sandbox_request_scheduler_ps.c b/runtime/src/sandbox_request_scheduler_ps.c index adba354..5ab1913 100644 --- a/runtime/src/sandbox_request_scheduler_ps.c +++ b/runtime/src/sandbox_request_scheduler_ps.c @@ -18,7 +18,8 @@ sandbox_request_scheduler_ps_add(void *sandbox_request) exit(EXIT_FAILURE); } - return return_code == 0 ? sandbox_request : NULL; + if (return_code != 0) return NULL; + return sandbox_request; } /** diff --git a/runtime/src/worker_thread.c b/runtime/src/worker_thread.c index 2dca909..1b695a9 100644 --- a/runtime/src/worker_thread.c +++ b/runtime/src/worker_thread.c @@ -58,7 +58,8 @@ worker_thread_switch_to_sandbox(struct sandbox *next_sandbox) /* Get the old sandbox we're switching from */ struct sandbox *previous_sandbox = current_sandbox_get(); - arch_context_t *previous_register_context = previous_sandbox == NULL ? NULL : &previous_sandbox->ctxt; + arch_context_t *previous_register_context = NULL; + if (previous_sandbox != NULL) previous_register_context = &previous_sandbox->ctxt; /* Set the current sandbox to the next */ current_sandbox_set(next_sandbox);