1. optimization: avoid global queue if the next sandbox has the same deadline with the current sandbox. 2. correct format and enrich comments

main
xiaosuGW 3 years ago
parent b78cd5fdfb
commit 417427ab39

@ -58,6 +58,7 @@ BINARY_NAME=sledgert
# CFLAGS += -DLOG_REQUEST_ALLOCATION # CFLAGS += -DLOG_REQUEST_ALLOCATION
# CFLAGS += -DLOG_PREEMPTION # CFLAGS += -DLOG_PREEMPTION
# CFLAGS += -DLOG_MODULE_LOADING # CFLAGS += -DLOG_MODULE_LOADING
# CFLAGS += -DOPT_AVOID_GLOBAL_QUEUE
# This dumps per module *.csv files containing the cycle a sandbox has been in RUNNING when each # This dumps per module *.csv files containing the cycle a sandbox has been in RUNNING when each
# page is allocated. This helps understand the relationship to memory allocation and execution time. # page is allocated. This helps understand the relationship to memory allocation and execution time.

@ -25,7 +25,7 @@ struct sandbox_request {
uint64_t absolute_deadline; /* cycles */ uint64_t absolute_deadline; /* cycles */
char * previous_function_output; char * previous_function_output;
ssize_t output_length; ssize_t output_length;
ssize_t previous_request_length; /* previous request length */ ssize_t previous_request_length;
/* /*
* Unitless estimate of the instantaneous fraction of system capacity required to run the request * Unitless estimate of the instantaneous fraction of system capacity required to run the request
* Calculated by estimated execution time (cycles) * runtime_admissions_granularity / relative deadline (cycles) * Calculated by estimated execution time (cycles) * runtime_admissions_granularity / relative deadline (cycles)

@ -24,6 +24,7 @@ worker_thread_execute_epoll_loop(void)
struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS]; struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS];
int descriptor_count = epoll_wait(worker_thread_epoll_file_descriptor, epoll_events, int descriptor_count = epoll_wait(worker_thread_epoll_file_descriptor, epoll_events,
RUNTIME_MAX_EPOLL_EVENTS, 0); RUNTIME_MAX_EPOLL_EVENTS, 0);
if (descriptor_count < 0) { if (descriptor_count < 0) {
if (errno == EINTR) continue; if (errno == EINTR) continue;

@ -70,6 +70,10 @@ current_sandbox_start(void)
sandbox_initialize_stdio(sandbox); sandbox_initialize_stdio(sandbox);
struct module * next_module = sandbox->module->next_module; struct module * next_module = sandbox->module->next_module;
/*
* Add the client fd to epoll if it is the first or last sandbox in the chain because they
* need to read and write from/to this fd
*/
if (sandbox->request_from_outside || next_module == NULL) { if (sandbox->request_from_outside || next_module == NULL) {
sandbox_open_http(sandbox); sandbox_open_http(sandbox);
} }
@ -78,10 +82,12 @@ current_sandbox_start(void)
if (sandbox_receive_request(sandbox) < 0) { if (sandbox_receive_request(sandbox) < 0) {
error_message = "Unable to receive or parse client request\n"; error_message = "Unable to receive or parse client request\n";
goto err; goto err;
}; }
} else { } else {
/* copy previous output to sandbox->request_response_data, as the input for the current sandbox.*/ /*
/* let sandbox->http_request->body points to sandbox->request_response_data*/ * Copy previous output to sandbox->request_response_data, as the input for the current sandbox.
* Let sandbox->http_request->body points to sandbox->request_response_data
*/
assert(sandbox->previous_function_output != NULL); assert(sandbox->previous_function_output != NULL);
memcpy(sandbox->request_response_data, sandbox->previous_function_output, sandbox->output_length); memcpy(sandbox->request_response_data, sandbox->previous_function_output, sandbox->output_length);
sandbox->http_request.body = sandbox->request_response_data; sandbox->http_request.body = sandbox->request_response_data;
@ -107,7 +113,12 @@ current_sandbox_start(void)
if (next_module != NULL) { if (next_module != NULL) {
/* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */ /* Generate a new request, copy the current sandbox's output to the next request's buffer, and put it to the global queue */
ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length; ssize_t output_length = sandbox->request_response_data_length - sandbox->request_length;
char * pre_func_output = (char *) malloc(output_length); char * pre_func_output = (char *)malloc(output_length);
if (!pre_func_output) {
fprintf(stderr, "Failed to allocate memory for the previous output: %s\n", strerror(errno));
goto err;
};
memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length); memcpy(pre_func_output, sandbox->request_response_data + sandbox->request_length, output_length);
uint64_t enqueue_timestamp = __getcycles(); uint64_t enqueue_timestamp = __getcycles();
struct sandbox_request *sandbox_request = struct sandbox_request *sandbox_request =
@ -116,11 +127,31 @@ current_sandbox_start(void)
(const struct sockaddr *)&sandbox->client_address, (const struct sockaddr *)&sandbox->client_address,
sandbox->request_arrival_timestamp, enqueue_timestamp, sandbox->request_arrival_timestamp, enqueue_timestamp,
true, pre_func_output, output_length); true, pre_func_output, output_length);
/* TODO: all sandboxs in the chain share the same request id, but sandbox_request_allocate() will busy-wait to generate an unique /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate()
id, should we optimize it here?*/ * will busy-wait to generate an unique id, should we optimize it here?
*/
sandbox_request->id = sandbox->id; sandbox_request->id = sandbox->id;
#ifdef OPT_AVOID_GLOBAL_QUEUE
/* TODO: The running time of the current sandbox contains the next sandbox's initialization time, does it matter? */
if (sandbox->absolute_deadline == sandbox_request->absolute_deadline) {
/* Put the next sandbox to the local run queue to reduce the overhead of the global queue */
struct sandbox *next_sandbox = sandbox_allocate(sandbox_request);
if (!next_sandbox) {
free(sandbox_request);
goto err;
}
assert(next_sandbox->state == SANDBOX_INITIALIZED);
sandbox_set_as_runnable(next_sandbox, SANDBOX_INITIALIZED);
} else {
/* Add to the Global Sandbox Request Scheduler */ /* Add to the Global Sandbox Request Scheduler */
global_request_scheduler_add(sandbox_request); global_request_scheduler_add(sandbox_request);
}
#else
/* Add to the Global Sandbox Request Scheduler */
global_request_scheduler_add(sandbox_request);
#endif
/* Remove the client fd from epoll if it is the first sandbox in the chain */
if (sandbox->request_from_outside) { if (sandbox->request_from_outside) {
sandbox_remove_from_epoll(sandbox); sandbox_remove_from_epoll(sandbox);
} }

@ -75,7 +75,8 @@ sandbox_request_get_priority_fn(void *element)
* Initializes the variant and registers against the polymorphic interface * Initializes the variant and registers against the polymorphic interface
*/ */
void void
global_request_scheduler_minheap_initialize() { global_request_scheduler_minheap_initialize()
{
global_request_scheduler_minheap = priority_queue_initialize(4096, true, sandbox_request_get_priority_fn); global_request_scheduler_minheap = priority_queue_initialize(4096, true, sandbox_request_get_priority_fn);
struct global_request_scheduler_config config = { struct global_request_scheduler_config config = {

@ -559,14 +559,9 @@ module_new_from_json(char *file_name)
assert(module); assert(module);
if (tail_module == NULL) { if (tail_module != NULL) { tail_module->next_module = module; }
tail_module = module; tail_module = module;
tail_module->next_module = NULL; tail_module->next_module = NULL;
} else {
tail_module->next_module = module;
tail_module = module;
tail_module->next_module = NULL;
}
module_set_http_info(module, request_count, request_headers, request_content_type, module_set_http_info(module, request_count, request_headers, request_content_type,
response_count, reponse_headers, response_content_type); response_count, reponse_headers, response_content_type);

Loading…
Cancel
Save