diff --git a/.gitignore b/.gitignore index c000bd5..4fbb560 100644 --- a/.gitignore +++ b/.gitignore @@ -58,6 +58,9 @@ runtime/tests/**/*.txt runtime/tests/**/*.xlsx runtime/tests/test_data runtime/tests/*.log +data/*.txt +data/*.log +data/*.json # Swap Files *.swp diff --git a/data/get_average.py b/data/get_average.py new file mode 100644 index 0000000..799691e --- /dev/null +++ b/data/get_average.py @@ -0,0 +1,30 @@ +import sys + +def calculate_average(input_file, column_index): + total = 0 + count = 0 + + with open(input_file, 'r') as f: + for line in f: + columns = line.strip().split(',') + if len(columns) > column_index: + try: + value = float(columns[column_index]) + total += value + count += 1 + except ValueError: + print(f"error value: {columns[column_index]}") + + if count > 0: + average = total / count + print(f"list {column_index + 1} average: {average}") + else: + print("no value") + +if __name__ == "__main__": + if len(sys.argv) != 3: + print(" python calculate_average.py input_file column_index") + else: + input_file = sys.argv[1] + column_index = int(sys.argv[2]) - 1 + calculate_average(input_file, column_index) diff --git a/data/split_logs.py b/data/split_logs.py new file mode 100644 index 0000000..d01b491 --- /dev/null +++ b/data/split_logs.py @@ -0,0 +1,24 @@ +# split_logs.py + +def split_logs(input_file): + modules = { + "resize1": [], + "png2bmp1": [], + "lpd_wasm1": [], + "cifar10_1": [], + "work1": [] + } + + with open(input_file, 'r') as f: + for line in f: + for module in modules.keys(): + if module in line: + modules[module].append(line.strip()) + break + + for module, entries in modules.items(): + with open(f"{module}.txt", 'w') as outfile: + outfile.write("\n".join(entries) + "\n") + +if __name__ == "__main__": + split_logs("sledge.log") diff --git a/data/txt_split.py b/data/txt_split.py new file mode 100644 index 0000000..cbfaf2f --- /dev/null +++ b/data/txt_split.py @@ -0,0 +1,20 @@ +import sys + +def split_columns(input_file): + columns = [] + + with open(input_file, 'r') as f: + for line in f: + parts = line.strip().split(',') + for i, part in enumerate(parts): + if len(columns) <= i: + columns.append([]) + columns[i].append(part) + + for i, column in enumerate(columns): + with open(f"{input_file[:-4]}_column_{i + 1}.txt", 'w') as outfile: + outfile.write("\n".join(column) + "\n") + +if __name__ == "__main__": + for input_file in sys.argv[1:]: + split_columns(input_file) diff --git a/hey.txt b/hey.txt deleted file mode 100644 index 6e90dd3..0000000 --- a/hey.txt +++ /dev/null @@ -1,46 +0,0 @@ - -Summary: - Total: 30.0061 secs - Slowest: 0.1721 secs - Fastest: 0.0035 secs - Average: 0.0088 secs - Requests/sec: 227.3541 - - Total data: 327456 bytes - Size/request: 48 bytes - -Response time histogram: - 0.004 [1] | - 0.020 [6695] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ - 0.037 [114] |■ - 0.054 [4] | - 0.071 [0] | - 0.088 [5] | - 0.105 [1] | - 0.122 [1] | - 0.138 [0] | - 0.155 [0] | - 0.172 [1] | - - -Latency distribution: - 10% in 0.0059 secs - 25% in 0.0068 secs - 50% in 0.0079 secs - 75% in 0.0095 secs - 90% in 0.0119 secs - 95% in 0.0143 secs - 99% in 0.0237 secs - -Details (average, fastest, slowest): - DNS+dialup: 0.0004 secs, 0.0035 secs, 0.1721 secs - DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs - req write: 0.0001 secs, 0.0000 secs, 0.0067 secs - resp wait: 0.0081 secs, 0.0031 secs, 0.1715 secs - resp read: 0.0002 secs, 0.0000 secs, 0.0049 secs - -Status code distribution: - [200] 6822 responses - - - diff --git a/runtime/include/runtime.h b/runtime/include/runtime.h index 1f14e4a..4ec5b21 100644 --- a/runtime/include/runtime.h +++ b/runtime/include/runtime.h @@ -44,7 +44,7 @@ extern uint32_t runtime_worker_threads_count; extern int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT]; - +extern uint64_t runtime_worker_threads_laxity[RUNTIME_WORKER_THREAD_CORE_COUNT]; extern void runtime_initialize(void); extern void runtime_set_pthread_prio(pthread_t thread, unsigned int nice); extern void runtime_set_resource_limits_to_max(void); diff --git a/runtime/include/sandbox_functions.h b/runtime/include/sandbox_functions.h index c564166..4f05d21 100644 --- a/runtime/include/sandbox_functions.h +++ b/runtime/include/sandbox_functions.h @@ -124,6 +124,15 @@ sandbox_get_mdl_priority(void *element) return remaining_slack_mdl; }; +static inline uint64_t +sandbox_get_llf_priority(void *element) +{ + struct sandbox *sandbox = (struct sandbox *)element; + uint64_t now = __getcycles(); + uint64_t Laxity_llf = sandbox->laxity - (now - sandbox->last_update_timestamp); + return Laxity_llf; +}; + /** * Maps a sandbox fd to an underlying host fd * Returns error condition if the file_descriptor to set does not contain sandbox preopen magic diff --git a/runtime/include/sandbox_request.h b/runtime/include/sandbox_request.h index ec869ec..0f0a035 100644 --- a/runtime/include/sandbox_request.h +++ b/runtime/include/sandbox_request.h @@ -22,7 +22,7 @@ struct sandbox_pre_functions_output { }; struct sandbox_request { uint64_t id; - bool request_from_outside; /* true is yes, false is no */ + bool request_from_outside; /* true is yes, false is no */ struct module * module; char * arguments; int socket_descriptor; @@ -31,12 +31,13 @@ struct sandbox_request { uint64_t enqueue_timestamp; /* cycles */ uint64_t absolute_deadline; /* cycles */ uint64_t last_update_timestamp; /* cycles */ - uint64_t remaining_slack; /* cycles */ - struct sandbox_pre_functions_output *pre_functions_output; + uint64_t remaining_slack; /* cycles */ + uint64_t laxity; /* cycles */ + struct sandbox_pre_functions_output *pre_functions_output; pthread_spinlock_t lock; - char * previous_function_output; - ssize_t output_length; - ssize_t previous_request_length; + char * previous_function_output; + ssize_t output_length; + ssize_t previous_request_length; /* * Unitless estimate of the instantaneous fraction of system capacity required to run the request * Calculated by estimated execution time (cycles) * runtime_admissions_granularity / relative deadline (cycles) @@ -82,7 +83,7 @@ sandbox_request_log_allocation(struct sandbox_request *sandbox_request) static inline struct sandbox_request * sandbox_request_allocate(struct module *module, bool request_from_outside, ssize_t request_length, char *arguments, int socket_descriptor, const struct sockaddr *socket_address, - uint64_t request_arrival_timestamp, uint64_t enqueue_timestamp, uint64_t remaining_slack, + uint64_t request_arrival_timestamp, uint64_t enqueue_timestamp, uint64_t remaining_slack, uint64_t laxity, uint64_t admissions_estimate, char *previous_function_output, ssize_t output_length) { struct sandbox_request *sandbox_request = (struct sandbox_request *)malloc(sizeof(struct sandbox_request)); @@ -104,6 +105,7 @@ sandbox_request_allocate(struct module *module, bool request_from_outside, ssize sandbox_request->previous_request_length = request_length; sandbox_request->last_update_timestamp = enqueue_timestamp; sandbox_request->remaining_slack = remaining_slack; + sandbox_request->laxity = laxity; /*Avoid pointer suspension*/ sandbox_request->pre_functions_output = NULL; @@ -229,4 +231,3 @@ concatenate_outputs(struct sandbox_request *request) { request->output_length = total_length; request->previous_function_output = concatenated_output; } - diff --git a/runtime/include/sandbox_set_as_initialized.h b/runtime/include/sandbox_set_as_initialized.h index f3a6f6a..cfcdf40 100644 --- a/runtime/include/sandbox_set_as_initialized.h +++ b/runtime/include/sandbox_set_as_initialized.h @@ -52,7 +52,8 @@ sandbox_set_as_initialized(struct sandbox *sandbox, struct sandbox_request *sand /* Copy the socket descriptor, address, and arguments of the client invocation */ sandbox->absolute_deadline = sandbox_request->absolute_deadline; - sandbox->remaining_slack = sandbox_request->remaining_slack; + sandbox->remaining_slack = sandbox_request->remaining_slack; + sandbox->laxity = sandbox_request->laxity; sandbox->last_update_timestamp = sandbox_request->last_update_timestamp; sandbox->arguments = (void *)sandbox_request->arguments; sandbox->client_socket_descriptor = sandbox_request->socket_descriptor; diff --git a/runtime/include/sandbox_set_as_running.h b/runtime/include/sandbox_set_as_running.h index 3f23bb7..4a1a3ec 100644 --- a/runtime/include/sandbox_set_as_running.h +++ b/runtime/include/sandbox_set_as_running.h @@ -25,11 +25,13 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state) //uint64_t last = sandbox->last_update_timestamp; //uint64_t last_rs = sandbox->remaining_slack; sandbox->remaining_slack -= (now - sandbox->last_update_timestamp); + sandbox->laxity -= (now - sandbox->last_update_timestamp); sandbox->last_update_timestamp = now; sandbox->runnable_duration += duration_of_last_state; current_sandbox_set(sandbox); runtime_worker_threads_deadline[worker_thread_idx] = sandbox->absolute_deadline; runtime_worker_threads_remaining_slack[worker_thread_idx] = sandbox->remaining_slack; + runtime_worker_threads_laxity[worker_thread_idx] = sandbox->laxity; //mem_log("time %lu sandbox starts running, request id:%d name %s obj=%p remaining slack %lu, last_rs %lu now %lu last %lu \n", start_execution, // sandbox->id, sandbox->module->name, sandbox, sandbox->remaining_slack, last_rs, now, last); /* Does not handle context switch because the caller knows if we need to use fast or slow switched */ @@ -49,3 +51,5 @@ sandbox_set_as_running(struct sandbox *sandbox, sandbox_state_t last_state) sandbox->last_state_change_timestamp = now; sandbox->state = SANDBOX_RUNNING; } + + diff --git a/runtime/include/sandbox_types.h b/runtime/include/sandbox_types.h index d19451e..2c25a17 100644 --- a/runtime/include/sandbox_types.h +++ b/runtime/include/sandbox_types.h @@ -54,8 +54,9 @@ struct sandbox { uint64_t response_timestamp; /* Timestamp when response is sent */ uint64_t completion_timestamp; /* Timestamp when sandbox runs to completion */ uint64_t last_state_change_timestamp; /* Used for bookkeeping of actual execution time */ - uint64_t last_update_timestamp; /* Used for bookkeeping timestamp for SRSF */ + uint64_t last_update_timestamp; /* Used for bookkeeping timestamp for SRSF && LLF */ uint64_t remaining_slack; /* Cycles */ + uint64_t laxity; /* Cycles */ #ifdef LOG_SANDBOX_MEMORY_PROFILE uint32_t page_allocation_timestamps[SANDBOX_PAGE_ALLOCATION_TIMESTAMP_COUNT]; size_t page_allocation_timestamps_size; diff --git a/runtime/include/scheduler.h b/runtime/include/scheduler.h index 4a18810..5409b01 100644 --- a/runtime/include/scheduler.h +++ b/runtime/include/scheduler.h @@ -29,7 +29,8 @@ enum SCHEDULER SCHEDULER_FIFO = 0, SCHEDULER_EDF = 1, SCHEDULER_SRSF = 2, - SCHEDULER_MDL = 3 + SCHEDULER_MDL = 3, + SCHEDULER_LLF = 4, }; extern enum SCHEDULER scheduler; @@ -154,6 +155,45 @@ err_allocate: goto done; } +static inline struct sandbox * +scheduler_LLF_get_next() +{ + /* Get the deadline of the sandbox at the head of the local request queue */ + struct sandbox * local = local_runqueue_get_next(); + uint64_t local_Laxity = local == NULL ? UINT64_MAX : local->remaining_slack; + struct sandbox_request *request = NULL; + + uint64_t global_local_Laxity = global_request_scheduler_peek(); + + /* Try to pull and allocate from the global queue if earlier + * This will be placed at the head of the local runqueue */ + if (global_local_Laxity < local_Laxity && (local_workload_count <=2 || local_runqueue_count == 0)) { + if (global_request_scheduler_remove_if_earlier(&request, local_Laxity) == 0) { + + //uint64_t pop_time = __getcycles() - system_start_timestamp; + //mem_log("time %lu remove from GQ, request id:%d name %s remaining slack %lu\n", pop_time, + // request->id, request->module->name, request->remaining_slack); + + assert(request != NULL); + struct sandbox *global = sandbox_allocate(request); + if (!global) goto err_allocate; + + assert(global->state == SANDBOX_INITIALIZED); + sandbox_set_as_runnable(global, SANDBOX_INITIALIZED); + } + } + +/* Return what is at the head of the local runqueue or NULL if empty */ +done: + return local_runqueue_get_next(); +err_allocate: + client_socket_send(request->socket_descriptor, 503); + client_socket_close(request->socket_descriptor, &request->socket_address); + free(request); + request = NULL; + goto done; +} + static inline struct sandbox * scheduler_fifo_get_next() { @@ -200,6 +240,8 @@ scheduler_get_next() return scheduler_fifo_get_next(); case SCHEDULER_MDL: return scheduler_MDL_get_next(); + case SCHEDULER_LLF: + return scheduler_LLF_get_next(); default: panic("Unimplemented\n"); } @@ -218,6 +260,9 @@ scheduler_initialize() case SCHEDULER_MDL: global_request_scheduler_minheap_initialize(SCHEDULER_MDL); break; + case SCHEDULER_LLF: + global_request_scheduler_minheap_initialize(SCHEDULER_LLF); + break; case SCHEDULER_FIFO: global_request_scheduler_deque_initialize(); break; @@ -238,6 +283,10 @@ scheduler_runqueue_initialize() break; case SCHEDULER_MDL: local_runqueue_minheap_initialize(SCHEDULER_MDL); + break; + case SCHEDULER_LLF: + local_runqueue_minheap_initialize(SCHEDULER_LLF); + break; case SCHEDULER_FIFO: local_runqueue_list_initialize(); break; @@ -263,8 +312,8 @@ scheduler_preempt(ucontext_t *user_context) struct sandbox *current = current_sandbox_get(); assert(current != NULL); assert(current->state == SANDBOX_RUNNING); - - if (current-> remaining_slack <= 5000 * runtime_processor_speed_MHz) { + uint64_t RR_least_time = 5000 * runtime_processor_speed_MHz; + if (current-> remaining_slack <= RR_least_time || current->laxity <= RR_least_time) { return; } /* This is for better state-change bookkeeping */ @@ -340,10 +389,12 @@ scheduler_print(enum SCHEDULER variant) return "FIFO"; case SCHEDULER_EDF: return "EDF"; - case SCHEDULER_MDL: - return "MDL"; case SCHEDULER_SRSF: return "SRSF"; + case SCHEDULER_MDL: + return "MDL"; + case SCHEDULER_LLF: + return "LLF"; } } diff --git a/runtime/src/current_sandbox.c b/runtime/src/current_sandbox.c index 51dc6ca..d9d431d 100644 --- a/runtime/src/current_sandbox.c +++ b/runtime/src/current_sandbox.c @@ -58,6 +58,13 @@ current_sandbox_disable_preemption(struct sandbox *sandbox) } } +static inline void +current_sandbox_get_newlaxity(struct sandbox *sandbox, uint64_t now) +{ + assert(sandbox); + sandbox->remaining_slack -= (now - sandbox->last_update_timestamp); +} + /** * Sandbox execution logic * Handles setup, request parsing, WebAssembly initialization, function execution, response building and @@ -170,13 +177,14 @@ current_sandbox_start(void) goto err; } memcpy(individual_pre_func_output, pre_func_output, output_length); - uint64_t enqueue_timestamp = __getcycles(); + uint64_t now = __getcycles(); + current_sandbox_get_newlaxity(sandbox, now); struct sandbox_request *sandbox_request = sandbox_request_allocate(next_module_node, false, sandbox->request_length, next_module_node->name, sandbox->client_socket_descriptor, (const struct sockaddr *)&sandbox->client_address, - sandbox->request_arrival_timestamp, enqueue_timestamp, - sandbox->remaining_slack, true, NULL, 0); + sandbox->request_arrival_timestamp, now, + sandbox->remaining_slack, sandbox->laxity, true, NULL, 0); /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() * will busy-wait to generate an unique id, should we optimize it here? */ @@ -242,15 +250,16 @@ current_sandbox_start(void) uint64_t *request_id = (uint64_t *)map_get(sandbox_request_id, cur_request_id, strlen(cur_request_id), &ret_value_len); bool mapflag = false; if (!request_id) { - uint64_t enqueue_timestamp = __getcycles(); + uint64_t now = __getcycles(); + current_sandbox_get_newlaxity(sandbox, now); //mem_log("time %lu request id:%d executing, name:%s remaining slack %lu\n", current_rs, // sandbox->id, sandbox->module->name, sandbox->remaining_slack); struct sandbox_request *sandbox_request = sandbox_request_allocate(next_module_node, false, sandbox->request_length, next_module_node->name, sandbox->client_socket_descriptor, (const struct sockaddr *)&sandbox->client_address, - sandbox->request_arrival_timestamp, enqueue_timestamp, - sandbox->remaining_slack, true, NULL, 0); + sandbox->request_arrival_timestamp, now, + sandbox->remaining_slack,sandbox->laxity, true, NULL, 0); /* TODO: All sandboxs in the chain share the same request id, but sandbox_request_allocate() * will busy-wait to generate an unique id, should we optimize it here? */ diff --git a/runtime/src/global_request_scheduler_minheap.c b/runtime/src/global_request_scheduler_minheap.c index c507afc..fb60446 100644 --- a/runtime/src/global_request_scheduler_minheap.c +++ b/runtime/src/global_request_scheduler_minheap.c @@ -89,6 +89,16 @@ sandbox_request_get_priority_mdl_fn(void *element) return remaining_slack_mdl; }; +uint64_t +sandbox_request_get_priority_llf_fn(void *element) +{ + struct sandbox_request *sandbox_request = (struct sandbox_request *)element; + uint64_t now = __getcycles(); + uint64_t Laxity_llf = sandbox_request->laxity - (now - sandbox_request->last_update_timestamp); + return Laxity_llf; +}; + + /** * Initializes the variant and registers against the polymorphic interface */ @@ -101,6 +111,8 @@ global_request_scheduler_minheap_initialize(enum SCHEDULER scheduler) global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_srsf_fn); } else if (scheduler == SCHEDULER_MDL) { global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_mdl_fn); + } else if (scheduler == SCHEDULER_LLF) { + global_request_scheduler_minheap = priority_queue_initialize(40960, true, sandbox_request_get_priority_llf_fn); } struct global_request_scheduler_config config = { diff --git a/runtime/src/listener_thread.c b/runtime/src/listener_thread.c index 107b41b..c0b3042 100644 --- a/runtime/src/listener_thread.c +++ b/runtime/src/listener_thread.c @@ -186,10 +186,10 @@ listener_thread_main(void *dummy) while (rear != front) { struct module *current_module = queue[front++]; - if (scheduler == SCHEDULER_SRSF || scheduler == SCHEDULER_EDF) + if (scheduler == SCHEDULER_SRSF || scheduler == SCHEDULER_EDF || scheduler == SCHEDULER_LLF) { estimated_execution_time += admission_info_get_percentile(¤t_module->admissions_info); - }else if (scheduler == SCHEDULER_MDL){ + }else if (scheduler == SCHEDULER_MDL ){ /* Set a baseline model, and if the predicted parameters of the model are more than 1.2 times the parameters of the baseline, then select the baseline parameters */ uint64_t estimated_execution_base_percentile = 0, estimated_execution_model = 0; estimated_execution_base_percentile = admission_info_get_percentile(¤t_module->admissions_info); @@ -219,7 +219,7 @@ listener_thread_main(void *dummy) } } /* Adding system start timestamp to avoid negative remaining slack in the following update. They are all cycles */ - uint64_t remaining_slack = system_start_timestamp + module->relative_deadline - estimated_execution_time; + uint64_t remaining_slack = system_start_timestamp + module->relative_deadline - estimated_execution_time; uint64_t request_arrival_timestamp = __getcycles(); @@ -227,7 +227,7 @@ listener_thread_main(void *dummy) struct sandbox_request *sandbox_request = sandbox_request_allocate(module, true, 0, module->name, client_socket, (const struct sockaddr *)&client_address, - request_arrival_timestamp, request_arrival_timestamp,remaining_slack, + request_arrival_timestamp, request_arrival_timestamp,remaining_slack, remaining_slack, work_admitted, NULL, 0); /* Add to the Global Sandbox Request Scheduler */ diff --git a/runtime/src/local_runqueue_minheap.c b/runtime/src/local_runqueue_minheap.c index 583d6d6..6bde304 100644 --- a/runtime/src/local_runqueue_minheap.c +++ b/runtime/src/local_runqueue_minheap.c @@ -84,7 +84,9 @@ local_runqueue_minheap_initialize(enum SCHEDULER scheduler) local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_srsf_priority); } else if (scheduler == SCHEDULER_MDL) { local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_mdl_priority); - } else { + } else if (scheduler == SCHEDULER_LLF) { + local_runqueue_minheap = priority_queue_initialize(10240, false, sandbox_get_llf_priority); + } else{ panic("Invalid scheduler type %d\n", scheduler); } diff --git a/runtime/src/main.c b/runtime/src/main.c index bd443b2..415be7c 100644 --- a/runtime/src/main.c +++ b/runtime/src/main.c @@ -188,6 +188,9 @@ runtime_configure() scheduler = SCHEDULER_SRSF; } else if (strcmp(scheduler_policy, "MDL") == 0) { scheduler = SCHEDULER_MDL; + } else if (strcmp(scheduler_policy, "LLF") == 0) + { + scheduler = SCHEDULER_LLF; } else { panic("Invalid scheduler policy: %s. Must be {EDF|FIFO}\n", scheduler_policy); } @@ -199,7 +202,7 @@ runtime_configure() if (strcmp(sigalrm_policy, "BROADCAST") == 0) { runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_BROADCAST; } else if (strcmp(sigalrm_policy, "TRIAGED") == 0) { - if (unlikely(scheduler != SCHEDULER_EDF && scheduler != SCHEDULER_SRSF && scheduler != SCHEDULER_MDL)) panic("triaged sigalrm handlers are only valid with EDF and SRSF\n"); + if (unlikely(scheduler != SCHEDULER_EDF && scheduler != SCHEDULER_SRSF && scheduler != SCHEDULER_MDL && scheduler != SCHEDULER_LLF)) panic("triaged sigalrm handlers are only valid with EDF and SRSF\n"); runtime_sigalrm_handler = RUNTIME_SIGALRM_HANDLER_TRIAGED; } else { panic("Invalid sigalrm policy: %s. Must be {BROADCAST|TRIAGED}\n", sigalrm_policy); diff --git a/runtime/src/runtime.c b/runtime/src/runtime.c index e4afa0f..21a2818 100644 --- a/runtime/src/runtime.c +++ b/runtime/src/runtime.c @@ -33,6 +33,7 @@ int runtime_worker_threads_argument[RUNTIME_WORKER_THREAD_CORE_COUNT] = { /* The active deadline of the sandbox running on each worker thread */ uint64_t runtime_worker_threads_deadline[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX }; uint64_t runtime_worker_threads_remaining_slack[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX }; +uint64_t runtime_worker_threads_laxity[RUNTIME_WORKER_THREAD_CORE_COUNT] = { UINT64_MAX }; /****************************************** * Shared Process / Listener Thread Logic * diff --git a/runtime/src/software_interrupt.c b/runtime/src/software_interrupt.c index 9c2cc3b..9b3e2ef 100644 --- a/runtime/src/software_interrupt.c +++ b/runtime/src/software_interrupt.c @@ -99,6 +99,11 @@ sigalrm_propagate_workers(siginfo_t *signal_info) uint64_t global_slack = global_request_scheduler_peek(); if (global_slack < local_remaining_slack) pthread_kill(runtime_worker_threads[i], SIGALRM); continue; + } else if (scheduler == SCHEDULER_LLF) { + uint64_t local_Laxity = runtime_worker_threads_laxity[i]; + uint64_t global_Laxity = global_request_scheduler_peek(); + if (global_Laxity < local_Laxity) pthread_kill(runtime_worker_threads[i], SIGALRM); + continue; } } case RUNTIME_SIGALRM_HANDLER_BROADCAST: { diff --git a/runtime/tests/start-edf.sh b/runtime/tests/start-edf.sh index 757b3ec..d593ee2 100755 --- a/runtime/tests/start-edf.sh +++ b/runtime/tests/start-edf.sh @@ -20,10 +20,10 @@ declare project_path="$( echo $project_path path=`pwd` #export SLEDGE_DISABLE_PREEMPTION=true -export SLEDGE_CPU_SPEED=3300 -#export SLEDGE_SCHEDULER=SRSF -export SLEDGE_SIGALRM_HANDLER=BROADCAST -#export SLEDGE_SIGALRM_HANDLER=TRIAGED +#export SLEDGE_CPU_SPEED=3300 +export SLEDGE_SCHEDULER=EDF +#export SLEDGE_SIGALRM_HANDLER=BROADCAST +export SLEDGE_SIGALRM_HANDLER=TRIAGED #export SLEDGE_NWORKERS=1 export SLEDGE_SCHEDULER=EDF export SLEDGE_SANDBOX_PERF_LOG=$path/$output @@ -37,7 +37,8 @@ cd $project_path/runtime/bin #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/mulitple_linear_chain.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing3.json -LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing4.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_fibonacci.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_sodresize.json #LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/my_sodresize.json +#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing4.json +LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_multiple_image_processing_graph.json \ No newline at end of file diff --git a/runtime/tests/start.sh b/runtime/tests/start.sh index 32008b9..e67585f 100755 --- a/runtime/tests/start.sh +++ b/runtime/tests/start.sh @@ -21,7 +21,7 @@ echo $project_path path=`pwd` #export SLEDGE_DISABLE_PREEMPTION=true export SLEDGE_CPU_SPEED=2400 -export SLEDGE_SCHEDULER=MDL +export SLEDGE_SCHEDULER=LLF export SLEDGE_SIGALRM_HANDLER=TRIAGED #export SLEDGE_SIGALRM_HANDLER=TRIAGED #export SLEDGE_NWORKERS=1 diff --git a/runtime/tests/start_noop1.sh b/runtime/tests/start_noop.sh similarity index 84% rename from runtime/tests/start_noop1.sh rename to runtime/tests/start_noop.sh index 09dbee9..efd2cd7 100755 --- a/runtime/tests/start_noop1.sh +++ b/runtime/tests/start_noop.sh @@ -22,11 +22,11 @@ path=`pwd` #export SLEDGE_DISABLE_PREEMPTION=true export SLEDGE_CPU_SPEED=2500 export SLEDGE_SCHEDULER=FIFO -export SLEDGE_SIGALRM_HANDLER=BROADCAST +#xport SLEDGE_SIGALRM_HANDLER=BROADCAST #export SLEDGE_SIGALRM_HANDLER=TRIAGED #export SLEDGE_NWORKERS=16 #export SLEDGE_SCHEDULER=EDF export SLEDGE_SANDBOX_PERF_LOG=$path/$output echo $SLEDGE_SANDBOX_PERF_LOG cd $project_path/runtime/bin -LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_noop1.json \ No newline at end of file +#LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH" ./sledgert ../tests/test_noop1.json \ No newline at end of file diff --git a/runtime/tests/test_max_rps.sh b/runtime/tests/test_max_rps.sh new file mode 100755 index 0000000..3042499 --- /dev/null +++ b/runtime/tests/test_max_rps.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +function usage { + echo "Please enter right parameters: current_rps(*5) add_step(*5) duratime" + exit 1 +} + +if [ $# -ne 3 ]; then + usage +fi + +echo "current_rps(*5) add_step(*5) duratime" + +#path="/home/njl/sledge/runtime/tests" +path="/home/hai/sledge/sledge/runtime/tests" + +current_rps=$1 +step=$2 +duratime=$3 +max_rps=0 +max_latency=0 + +output="hey_test_max_rps.log" +server_log_file="test_rps.log" + + +loop=1 + +for loop in {1..5}; do + $path/start-edf.sh $server_log_file >/dev/null 2>&1 & + echo "sledge is running loop $loop" + + ./test_rps.sh $output $duratime $current_rps 5k.jpg 10000 2>&1 & + pid1=$! + wait $pid1 + + $path/kill_sledge.sh + latency=$(grep "Requests" $output | awk -F ': ' '{print $2}') + + if (( $(echo "$latency < $max_rps" | bc -l) )); then + break + fi + + echo "loop_$loop RPS: $latency" + max_rps=$latency + current_rps=$((current_rps + step)) +done + +echo "Maximum RPS: $max_rps" + diff --git a/runtime/tests/test_rps.sh b/runtime/tests/test_rps.sh index 8e86d14..c904f35 100755 --- a/runtime/tests/test_rps.sh +++ b/runtime/tests/test_rps.sh @@ -16,6 +16,6 @@ port=$5 echo "hey test" -hey -disable-compression -disable-keepalive -disable-redirects -c 5 -q $rps -z $duration\s -t 0 -m GET -D "$image" "http://10.16.109.192:$port" > $output +hey -disable-compression -disable-keepalive -disable-redirects -c 5 -q $rps -z $duration\s -t 0 -m GET -D "$image" "http://127.0.0.1:$port" > $output #loadtest -c 5 --rps $rps -t $duration --method GET --data @$image "http://10.16.109.192:$port" > $output #hey -disable-compression -disable-keepalive -disable-redirects -c 8 -q 50 -z $duration\s -t 0 -m GET -D "$image" "http://10.10.1.1:$port" > $output \ No newline at end of file