diff --git a/.vscode/settings.json b/.vscode/settings.json index fe28c43..d2057b5 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -57,7 +57,10 @@ "time.h": "c", "local_runqueue_minheap.h": "c", "global_request_scheduler.h": "c", - "dlfcn.h": "c" + "dlfcn.h": "c", + "chrono": "c", + "common.h": "c", + "listener_thread.h": "c" }, "files.exclude": { "**/.git": true, diff --git a/runtime/Makefile b/runtime/Makefile index e69fce6..f36e951 100644 --- a/runtime/Makefile +++ b/runtime/Makefile @@ -53,7 +53,6 @@ BINARY_NAME=sledgert # CFLAGS += -DLOG_HTTP_PARSER # CFLAGS += -DLOG_STATE_CHANGES # CFLAGS += -DLOG_LOCK_OVERHEAD -# CFLAGS += -DLOG_LISTENER_LOCK_OVERHEAD # CFLAGS += -DLOG_CONTEXT_SWITCHES # CFLAGS += -DLOG_ADMISSIONS_CONTROL # CFLAGS += -DLOG_REQUEST_ALLOCATION diff --git a/runtime/include/arch/getcycles.h b/runtime/include/arch/getcycles.h new file mode 100644 index 0000000..1d960fe --- /dev/null +++ b/runtime/include/arch/getcycles.h @@ -0,0 +1,3 @@ +#pragma once + +extern unsigned long long int __getcycles(void); diff --git a/runtime/include/generic_thread.h b/runtime/include/generic_thread.h new file mode 100644 index 0000000..a600e58 --- /dev/null +++ b/runtime/include/generic_thread.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +#include "arch/getcycles.h" +#include "debuglog.h" + +extern __thread uint64_t generic_thread_lock_duration; +extern __thread uint64_t generic_thread_start_timestamp; + +/** + * Reports lock contention + */ +static inline void +generic_thread_dump_lock_overhead() +{ +#ifndef NDEBUG +#ifdef LOG_LOCK_OVERHEAD + uint64_t duration = __getcycles() - generic_thread_start_timestamp; + debuglog("Locks consumed %lu / %lu cycles, or %f%%\n", generic_thread_lock_duration, duration, + (double)generic_thread_lock_duration / duration * 100); +#endif +#endif +} + +void generic_thread_initialize(); diff --git a/runtime/include/listener_thread.h b/runtime/include/listener_thread.h new file mode 100644 index 0000000..26d5962 --- /dev/null +++ b/runtime/include/listener_thread.h @@ -0,0 +1,10 @@ +#pragma once + +#include "generic_thread.h" +#include "module.h" + +#define LISTENER_THREAD_CORE_ID 0 + +void listener_thread_initialize(void); +__attribute__((noreturn)) void *listener_thread_main(void *dummy); +int listener_thread_register_module(struct module *mod); diff --git a/runtime/include/lock.h b/runtime/include/lock.h index 1698bb0..d21677a 100644 --- a/runtime/include/lock.h +++ b/runtime/include/lock.h @@ -2,6 +2,7 @@ #include +#include "arch/getcycles.h" #include "runtime.h" #include "software_interrupt.h" @@ -31,7 +32,7 @@ typedef ck_spinlock_mcs_t lock_t; struct ck_spinlock_mcs _hygiene_##unique_variable_name##_node; \ uint64_t _hygiene_##unique_variable_name##_pre = __getcycles(); \ ck_spinlock_mcs_lock((lock), &(_hygiene_##unique_variable_name##_node)); \ - worker_thread_lock_duration += (__getcycles() - _hygiene_##unique_variable_name##_pre); + generic_thread_lock_duration += (__getcycles() - _hygiene_##unique_variable_name##_pre); /** * Unlocks a lock diff --git a/runtime/include/runtime.h b/runtime/include/runtime.h index f1fbdbd..4253faa 100644 --- a/runtime/include/runtime.h +++ b/runtime/include/runtime.h @@ -8,29 +8,6 @@ #include "likely.h" #include "types.h" -/* Dedicated Listener Core */ -#define LISTENER_THREAD_CORE_ID 0 -#define LISTENER_THREAD_MAX_EPOLL_EVENTS 128 - -#define RUNTIME_LOG_FILE "sledge.log" -/* random! */ -#define RUNTIME_MAX_SANDBOX_REQUEST_COUNT (1 << 19) -#define RUNTIME_READ_WRITE_VECTOR_LENGTH 16 - -/* One Hour. Fits in a uint32_t or an int64_t */ -#define RUNTIME_RELATIVE_DEADLINE_US_MAX 3600000000 - -/* One Hour. Fits in a uint32_t or an int64_t */ -#define RUNTIME_EXPECTED_EXECUTION_US_MAX 3600000000 - -/* 100 MB */ -#define RUNTIME_HTTP_REQUEST_SIZE_MAX 100000000 -/* 100 MB */ -#define RUNTIME_HTTP_RESPONSE_SIZE_MAX 100000000 - -/* Static buffer used for global deadline array */ -#define RUNTIME_MAX_WORKER_COUNT 32 - #ifndef NCORES #warning "NCORES not defined in Makefile. Defaulting to 2" #define NCORES 2 @@ -40,7 +17,16 @@ #error "RUNTIME MINIMUM REQUIREMENT IS 2 CORES" #endif -#define RUNTIME_WORKER_THREAD_CORE_COUNT (NCORES > 1 ? NCORES - 1 : NCORES) +#define RUNTIME_EXPECTED_EXECUTION_US_MAX 3600000000 +#define RUNTIME_HTTP_REQUEST_SIZE_MAX 100000000 /* 100 MB */ +#define RUNTIME_HTTP_RESPONSE_SIZE_MAX 100000000 /* 100 MB */ +#define RUNTIME_LOG_FILE "sledge.log" +#define RUNTIME_MAX_EPOLL_EVENTS 128 +#define RUNTIME_MAX_SANDBOX_REQUEST_COUNT (1 << 19) +#define RUNTIME_MAX_WORKER_COUNT 32 /* Static buffer size for per-worker globals */ +#define RUNTIME_READ_WRITE_VECTOR_LENGTH 16 +#define RUNTIME_RELATIVE_DEADLINE_US_MAX 3600000000 /* One Hour. Fits in uint32_t */ +#define RUNTIME_WORKER_THREAD_CORE_COUNT (NCORES > 1 ? NCORES - 1 : NCORES) enum RUNTIME_SCHEDULER { @@ -54,12 +40,6 @@ enum RUNTIME_SIGALRM_HANDLER RUNTIME_SIGALRM_HANDLER_TRIAGED = 1 }; -/* - * Descriptor of the epoll instance used to monitor the socket descriptors of registered - * serverless modules. The listener cores listens for incoming client requests through this. - */ -extern int runtime_epoll_file_descriptor; - extern bool runtime_preemption_enabled; /* @@ -87,13 +67,10 @@ extern void alloc_linear_memory(void); extern void expand_memory(void); INLINE char *get_function_from_table(uint32_t idx, uint32_t type_id); INLINE char *get_memory_ptr_for_runtime(uint32_t offset, uint32_t bounds_check); -extern void listener_thread_initialize(void); extern void runtime_initialize(void); extern void runtime_set_resource_limits_to_max(); extern void stub_init(int32_t offset); -unsigned long long __getcycles(void); - /** * Used to determine if running in the context of a worker thread * @returns true if worker. false if listener core @@ -110,7 +87,7 @@ runtime_is_worker() } static inline char * -print_runtime_scheduler(enum RUNTIME_SCHEDULER variant) +runtime_print_scheduler(enum RUNTIME_SCHEDULER variant) { switch (variant) { case RUNTIME_SCHEDULER_FIFO: @@ -121,7 +98,7 @@ print_runtime_scheduler(enum RUNTIME_SCHEDULER variant) }; static inline char * -print_runtime_sigalrm_handler(enum RUNTIME_SIGALRM_HANDLER variant) +runtime_print_sigalrm_handler(enum RUNTIME_SIGALRM_HANDLER variant) { switch (variant) { case RUNTIME_SIGALRM_HANDLER_BROADCAST: diff --git a/runtime/include/worker_thread.h b/runtime/include/worker_thread.h index 665bb4a..99beb47 100644 --- a/runtime/include/worker_thread.h +++ b/runtime/include/worker_thread.h @@ -1,11 +1,10 @@ #pragma once +#include "generic_thread.h" #include "runtime.h" -extern __thread uint64_t worker_thread_lock_duration; -extern __thread uint64_t worker_thread_start_timestamp; -extern __thread int worker_thread_epoll_file_descriptor; -extern __thread int worker_thread_idx; +extern __thread int worker_thread_epoll_file_descriptor; +extern __thread int worker_thread_idx; void *worker_thread_main(void *return_code); diff --git a/runtime/src/env.c b/runtime/src/env.c index b275b14..acda2f7 100644 --- a/runtime/src/env.c +++ b/runtime/src/env.c @@ -2,7 +2,7 @@ #include #include -#include "runtime.h" +#include "arch/getcycles.h" #include "worker_thread.h" extern int32_t inner_syscall_handler(int32_t n, int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f); diff --git a/runtime/src/generic_thread.c b/runtime/src/generic_thread.c new file mode 100644 index 0000000..1fb9f9c --- /dev/null +++ b/runtime/src/generic_thread.c @@ -0,0 +1,15 @@ +#include + +#include "arch/getcycles.h" + +/* Implemented by listener and workers */ + +__thread uint64_t generic_thread_lock_duration = 0; +__thread uint64_t generic_thread_start_timestamp = 0; + +void +generic_thread_initialize() +{ + generic_thread_start_timestamp = __getcycles(); + generic_thread_lock_duration = 0; +} diff --git a/runtime/src/listener_thread.c b/runtime/src/listener_thread.c new file mode 100644 index 0000000..b9bc198 --- /dev/null +++ b/runtime/src/listener_thread.c @@ -0,0 +1,189 @@ +#include + +#include "arch/getcycles.h" +#include "global_request_scheduler.h" +#include "generic_thread.h" +#include "listener_thread.h" +#include "runtime.h" + +/* + * Descriptor of the epoll instance used to monitor the socket descriptors of registered + * serverless modules. The listener cores listens for incoming client requests through this. + */ +int listener_thread_epoll_file_descriptor; + +/* Timestamp when listener thread began executing */ +static __thread uint64_t listener_thread_start_timestamp; + +/** + * Initializes the listener thread, pinned to core 0, and starts to listen for requests + */ +void +listener_thread_initialize(void) +{ + printf("Starting listener thread\n"); + cpu_set_t cs; + + CPU_ZERO(&cs); + CPU_SET(LISTENER_THREAD_CORE_ID, &cs); + + /* Setup epoll */ + listener_thread_epoll_file_descriptor = epoll_create1(0); + assert(listener_thread_epoll_file_descriptor >= 0); + + pthread_t listener_thread; + int ret = pthread_create(&listener_thread, NULL, listener_thread_main, NULL); + assert(ret == 0); + ret = pthread_setaffinity_np(listener_thread, sizeof(cpu_set_t), &cs); + assert(ret == 0); + ret = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cs); + assert(ret == 0); + + printf("\tListener core thread: %lx\n", listener_thread); +} + +/** + * @brief Registers a serverless module on the listener thread's epoll descriptor + **/ +int +listener_thread_register_module(struct module *mod) +{ + assert(mod != NULL); + if (unlikely(listener_thread_epoll_file_descriptor == 0)) { + panic("Attempting to register a module before listener thread initialization"); + } + + int rc = 0; + struct epoll_event accept_evt; + accept_evt.data.ptr = (void *)mod; + accept_evt.events = EPOLLIN; + rc = epoll_ctl(listener_thread_epoll_file_descriptor, EPOLL_CTL_ADD, mod->socket_descriptor, &accept_evt); + + return rc; +} + +/** + * @brief Execution Loop of the listener core, io_handles HTTP requests, allocates sandbox request objects, and + * pushes the sandbox object to the global dequeue + * @param dummy data pointer provided by pthreads API. Unused in this function + * @return NULL + * + * Used Globals: + * listener_thread_epoll_file_descriptor - the epoll file descriptor + * + */ +__attribute__((noreturn)) void * +listener_thread_main(void *dummy) +{ + struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS]; + + generic_thread_initialize(); + + while (true) { + /* + * Block indefinitely on the epoll file descriptor, waiting on up to a max number of events + * TODO: Is RUNTIME_MAX_EPOLL_EVENTS actually limited to the max number of modules? + */ + int descriptor_count = epoll_wait(listener_thread_epoll_file_descriptor, epoll_events, + RUNTIME_MAX_EPOLL_EVENTS, -1); + if (descriptor_count < 0) { + if (errno == EINTR) continue; + + panic("epoll_wait: %s", strerror(errno)); + } + /* Assumption: Because epoll_wait is set to not timeout, we should always have descriptors here + */ + assert(descriptor_count > 0); + + uint64_t request_arrival_timestamp = __getcycles(); + for (int i = 0; i < descriptor_count; i++) { + /* Check Event to determine if epoll returned an error */ + if ((epoll_events[i].events & EPOLLERR) == EPOLLERR) { + int error = 0; + socklen_t errlen = sizeof(error); + if (getsockopt(epoll_events[i].data.fd, SOL_SOCKET, SO_ERROR, (void *)&error, &errlen) + == 0) { + panic("epoll_wait: %s\n", strerror(error)); + } + panic("epoll_wait"); + }; + + /* Assumption: We have only registered EPOLLIN events, so we should see no others here + */ + assert((epoll_events[i].events & EPOLLIN) == EPOLLIN); + + /* Unpack module from epoll event */ + struct module *module = (struct module *)epoll_events[i].data.ptr; + assert(module); + + /* + * I don't think we're responsible to cleanup epoll events, but clearing to trigger + * the assertion just in case. + */ + epoll_events[i].data.ptr = NULL; + + /* Accept Client Request as a nonblocking socket, saving address information */ + struct sockaddr_in client_address; + socklen_t address_length = sizeof(client_address); + + /* + * Accept as many requests as possible, terminating when we would have blocked + * This inner loop is used in case there are more datagrams than epoll events for some + * reason + */ + while (true) { + int client_socket = accept4(module->socket_descriptor, + (struct sockaddr *)&client_address, &address_length, + SOCK_NONBLOCK); + if (unlikely(client_socket < 0)) { + if (errno == EWOULDBLOCK || errno == EAGAIN) break; + + panic("accept4: %s", strerror(errno)); + } + + /* We should never have accepted on fd 0, 1, or 2 */ + assert(client_socket != STDIN_FILENO); + assert(client_socket != STDOUT_FILENO); + assert(client_socket != STDERR_FILENO); + + /* + * According to accept(2), it is possible that the the sockaddr structure + * client_address may be too small, resulting in data being truncated to fit. + * The accept call mutates the size value to indicate that this is the case. + */ + if (address_length > sizeof(client_address)) { + debuglog("Client address %s truncated because buffer was too small\n", + module->name); + } + + http_total_increment_request(); + + /* + * Perform admissions control. + * If 0, workload was rejected, so close with 503 and continue + */ + uint64_t work_admitted = admissions_control_decide(module->admissions_info.estimate); + if (work_admitted == 0) { + client_socket_send(client_socket, 503); + if (unlikely(close(client_socket) < 0)) + debuglog("Error closing client socket - %s", strerror(errno)); + + continue; + } + + /* Allocate a Sandbox Request */ + struct sandbox_request *sandbox_request = + sandbox_request_allocate(module, module->name, client_socket, + (const struct sockaddr *)&client_address, + request_arrival_timestamp, work_admitted); + + /* Add to the Global Sandbox Request Scheduler */ + global_request_scheduler_add(sandbox_request); + + } /* while true */ + } /* for loop */ + generic_thread_dump_lock_overhead(); + } /* while true */ + + panic("Listener thread unexpectedly broke loop\n"); +} diff --git a/runtime/src/main.c b/runtime/src/main.c index adf76b3..2534416 100644 --- a/runtime/src/main.c +++ b/runtime/src/main.c @@ -15,6 +15,7 @@ #endif #include "debuglog.h" +#include "listener_thread.h" #include "module.h" #include "panic.h" #include "runtime.h" @@ -175,7 +176,7 @@ runtime_configure() } else { panic("Invalid scheduler policy: %s. Must be {EDF|FIFO}\n", scheduler_policy); } - printf("\tScheduler Policy: %s\n", print_runtime_scheduler(runtime_scheduler)); + printf("\tScheduler Policy: %s\n", runtime_print_scheduler(runtime_scheduler)); /* Sigalrm Handler Technique */ char *sigalrm_policy = getenv("SLEDGE_SIGALRM_HANDLER"); @@ -189,7 +190,7 @@ runtime_configure() } else { panic("Invalid sigalrm policy: %s. Must be {BROADCAST|TRIAGED}\n", sigalrm_policy); } - printf("\tSigalrm Policy: %s\n", print_runtime_sigalrm_handler(runtime_sigalrm_handler)); + printf("\tSigalrm Policy: %s\n", runtime_print_sigalrm_handler(runtime_sigalrm_handler)); /* Runtime Preemption Toggle */ char *preempt_disable = getenv("SLEDGE_DISABLE_PREEMPTION"); @@ -271,12 +272,6 @@ log_compiletime_config() printf("\tLog Lock Overhead: Disabled\n"); #endif -#ifdef LOG_LISTENER_LOCK_OVERHEAD - printf("\tLog Listener Lock Overhead: Enabled\n"); -#else - printf("\tLog Listener Lock Overhead: Disabled\n"); -#endif - #ifdef LOG_CONTEXT_SWITCHES printf("\tLog Context Switches: Enabled\n"); #else @@ -353,13 +348,17 @@ main(int argc, char **argv) runtime_allocate_available_cores(); runtime_configure(); runtime_initialize(); + + listener_thread_initialize(); + runtime_start_runtime_worker_threads(); + software_interrupt_initialize(); + software_interrupt_arm_timer(); + #ifdef LOG_MODULE_LOADING debuglog("Parsing modules file [%s]\n", argv[1]); #endif if (module_new_from_json(argv[1])) panic("failed to initialize module(s) defined in %s\n", argv[1]); - runtime_start_runtime_worker_threads(); - listener_thread_initialize(); for (int i = 0; i < runtime_worker_threads_count; i++) { int ret = pthread_join(runtime_worker_threads[i], NULL); diff --git a/runtime/src/module.c b/runtime/src/module.c index 423c0e8..f5917c7 100644 --- a/runtime/src/module.c +++ b/runtime/src/module.c @@ -10,6 +10,7 @@ #include "debuglog.h" #include "http.h" #include "likely.h" +#include "listener_thread.h" #include "module.h" #include "module_database.h" #include "panic.h" @@ -64,10 +65,7 @@ module_listen(struct module *module) /* Set the socket descriptor and register with our global epoll instance to monitor for incoming HTTP requests */ - struct epoll_event accept_evt; - accept_evt.data.ptr = (void *)module; - accept_evt.events = EPOLLIN; - rc = epoll_ctl(runtime_epoll_file_descriptor, EPOLL_CTL_ADD, module->socket_descriptor, &accept_evt); + rc = listener_thread_register_module(module); if (unlikely(rc < 0)) goto err_add_to_epoll; rc = 0; diff --git a/runtime/src/runtime.c b/runtime/src/runtime.c index ff26073..a3c2e6f 100644 --- a/runtime/src/runtime.c +++ b/runtime/src/runtime.c @@ -12,6 +12,7 @@ #include "global_request_scheduler_minheap.h" #include "http_parser_settings.h" #include "http_response.h" +#include "listener_thread.h" #include "module.h" #include "runtime.h" #include "sandbox_request.h" @@ -21,7 +22,6 @@ * Shared Process State * **************************/ -int runtime_epoll_file_descriptor; pthread_t runtime_worker_threads[RUNTIME_WORKER_THREAD_CORE_COUNT]; int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT] = { 0 }; /* The active deadline of the sandbox running on each worker thread */ @@ -88,10 +88,6 @@ runtime_initialize(void) sandbox_request_count_initialize(); sandbox_count_initialize(); - /* Setup epoll */ - runtime_epoll_file_descriptor = epoll_create1(0); - assert(runtime_epoll_file_descriptor >= 0); - /* Setup Scheduler */ switch (runtime_scheduler) { case RUNTIME_SCHEDULER_EDF: @@ -114,179 +110,3 @@ runtime_initialize(void) http_parser_settings_initialize(); admissions_control_initialize(); } - -/************************* - * Listener Thread Logic * - ************************/ - -static inline void -listener_thread_start_lock_overhead_measurement(uint64_t request_arrival_timestamp) -{ -#ifdef LOG_LISTENER_LOCK_OVERHEAD - worker_thread_start_timestamp = request_arrival_timestamp; - worker_thread_lock_duration = 0; -#endif -} - -static inline void -listener_thread_stop_lock_overhead_measurement() -{ -#ifdef LOG_LISTENER_LOCK_OVERHEAD - uint64_t worker_duration = __getcycles() - worker_thread_start_timestamp; - debuglog("Locks consumed %lu / %lu cycles, or %f%%\n", worker_thread_lock_duration, worker_duration, - (double)worker_thread_lock_duration / worker_duration * 100); -#endif -} - -/** - * @brief Execution Loop of the listener core, io_handles HTTP requests, allocates sandbox request objects, and - * pushes the sandbox object to the global dequeue - * @param dummy data pointer provided by pthreads API. Unused in this function - * @return NULL - * - * Used Globals: - * runtime_epoll_file_descriptor - the epoll file descriptor - * - */ -__attribute__((noreturn)) void * -listener_thread_main(void *dummy) -{ - struct epoll_event epoll_events[LISTENER_THREAD_MAX_EPOLL_EVENTS]; - - while (true) { - /* - * Block indefinitely on the epoll file descriptor, waiting on up to a max number of events - * TODO: Is LISTENER_THREAD_MAX_EPOLL_EVENTS actually limited to the max number of modules? - */ - int descriptor_count = epoll_wait(runtime_epoll_file_descriptor, epoll_events, - LISTENER_THREAD_MAX_EPOLL_EVENTS, -1); - if (descriptor_count < 0) { - if (errno == EINTR) continue; - - panic("epoll_wait: %s", strerror(errno)); - } - /* Assumption: Because epoll_wait is set to not timeout, we should always have descriptors here - */ - assert(descriptor_count > 0); - - uint64_t request_arrival_timestamp = __getcycles(); - listener_thread_start_lock_overhead_measurement(request_arrival_timestamp); - for (int i = 0; i < descriptor_count; i++) { - /* Check Event to determine if epoll returned an error */ - if ((epoll_events[i].events & EPOLLERR) == EPOLLERR) { - int error = 0; - socklen_t errlen = sizeof(error); - if (getsockopt(epoll_events[i].data.fd, SOL_SOCKET, SO_ERROR, (void *)&error, &errlen) - == 0) { - panic("epoll_wait: %s\n", strerror(error)); - } - panic("epoll_wait"); - }; - - /* Assumption: We have only registered EPOLLIN events, so we should see no others here - */ - assert((epoll_events[i].events & EPOLLIN) == EPOLLIN); - - /* Unpack module from epoll event */ - struct module *module = (struct module *)epoll_events[i].data.ptr; - assert(module); - - /* - * I don't think we're responsible to cleanup epoll events, but clearing to trigger - * the assertion just in case. - */ - epoll_events[i].data.ptr = NULL; - - /* Accept Client Request as a nonblocking socket, saving address information */ - struct sockaddr_in client_address; - socklen_t address_length = sizeof(client_address); - - /* - * Accept as many requests as possible, terminating when we would have blocked - * This inner loop is used in case there are more datagrams than epoll events for some - * reason - */ - while (true) { - int client_socket = accept4(module->socket_descriptor, - (struct sockaddr *)&client_address, &address_length, - SOCK_NONBLOCK); - if (unlikely(client_socket < 0)) { - if (errno == EWOULDBLOCK || errno == EAGAIN) break; - - panic("accept4: %s", strerror(errno)); - } - - /* We should never have accepted on fd 0, 1, or 2 */ - assert(client_socket != STDIN_FILENO); - assert(client_socket != STDOUT_FILENO); - assert(client_socket != STDERR_FILENO); - - /* - * According to accept(2), it is possible that the the sockaddr structure - * client_address may be too small, resulting in data being truncated to fit. - * The accept call mutates the size value to indicate that this is the case. - */ - if (address_length > sizeof(client_address)) { - debuglog("Client address %s truncated because buffer was too small\n", - module->name); - } - - http_total_increment_request(); - - /* - * Perform admissions control. - * If 0, workload was rejected, so close with 503 and continue - */ - uint64_t work_admitted = admissions_control_decide(module->admissions_info.estimate); - if (work_admitted == 0) { - client_socket_send(client_socket, 503); - if (unlikely(close(client_socket) < 0)) - debuglog("Error closing client socket - %s", strerror(errno)); - - continue; - } - - /* Allocate a Sandbox Request */ - struct sandbox_request *sandbox_request = - sandbox_request_allocate(module, module->name, client_socket, - (const struct sockaddr *)&client_address, - request_arrival_timestamp, work_admitted); - - /* Add to the Global Sandbox Request Scheduler */ - global_request_scheduler_add(sandbox_request); - - } /* while true */ - } /* for loop */ - listener_thread_stop_lock_overhead_measurement(); - } /* while true */ - - panic("Listener thread unexpectedly broke loop\n"); - - - /* Cleanup Tasks... These won't run, but placed here to keep track */ - fclose(runtime_sandbox_perf_log); -} - -/** - * Initializes the listener thread, pinned to core 0, and starts to listen for requests - */ -void -listener_thread_initialize(void) -{ - printf("Starting listener thread\n"); - cpu_set_t cs; - - CPU_ZERO(&cs); - CPU_SET(LISTENER_THREAD_CORE_ID, &cs); - - pthread_t listener_thread; - int ret = pthread_create(&listener_thread, NULL, listener_thread_main, NULL); - assert(ret == 0); - ret = pthread_setaffinity_np(listener_thread, sizeof(cpu_set_t), &cs); - assert(ret == 0); - ret = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cs); - assert(ret == 0); - - software_interrupt_initialize(); - software_interrupt_arm_timer(); -} diff --git a/runtime/src/worker_thread.c b/runtime/src/worker_thread.c index 7eb893b..43e7b67 100644 --- a/runtime/src/worker_thread.c +++ b/runtime/src/worker_thread.c @@ -27,12 +27,6 @@ __thread struct arch_context worker_thread_base_context; __thread int worker_thread_epoll_file_descriptor; -/* Total Lock Contention in Cycles */ -__thread uint64_t worker_thread_lock_duration; - -/* Timestamp when worker thread began executing */ -__thread uint64_t worker_thread_start_timestamp; - /* Used to index into global arguments and deadlines arrays */ __thread int worker_thread_idx; @@ -40,21 +34,6 @@ __thread int worker_thread_idx; * Worker Thread Logic * **********************/ -/** - * Reports lock contention for the worker thread - */ -static inline void -worker_thread_dump_lock_overhead() -{ -#ifndef NDEBUG -#ifdef LOG_LOCK_OVERHEAD - uint64_t worker_duration = __getcycles() - worker_thread_start_timestamp; - debuglog("Locks consumed %lu / %lu cycles, or %f%%\n", worker_thread_lock_duration, worker_duration, - (double)worker_thread_lock_duration / worker_duration * 100); -#endif -#endif -} - /** * Conditionally triggers appropriate state changes for exiting sandboxes * @param exiting_sandbox - The sandbox that ran to completion @@ -231,9 +210,9 @@ static inline void worker_thread_execute_epoll_loop(void) { while (true) { - struct epoll_event epoll_events[LISTENER_THREAD_MAX_EPOLL_EVENTS]; + struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS]; int descriptor_count = epoll_wait(worker_thread_epoll_file_descriptor, epoll_events, - LISTENER_THREAD_MAX_EPOLL_EVENTS, 0); + RUNTIME_MAX_EPOLL_EVENTS, 0); if (descriptor_count < 0) { if (errno == EINTR) continue; @@ -298,10 +277,6 @@ worker_thread_main(void *argument) /* Index was passed via argument */ worker_thread_idx = *(int *)argument; - /* Initialize Bookkeeping */ - worker_thread_start_timestamp = __getcycles(); - worker_thread_lock_duration = 0; - /* Initialize Base Context as unused * The SP and IP are populated during the first FAST switch away */ @@ -370,7 +345,7 @@ worker_thread_on_sandbox_exit(struct sandbox *exiting_sandbox) { assert(exiting_sandbox); assert(!software_interrupt_is_enabled()); - worker_thread_dump_lock_overhead(); + generic_thread_dump_lock_overhead(); worker_thread_switch_to_base_context(); panic("Unexpected return\n"); }